Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.519
      1 /*	$NetBSD: if_wm.c,v 1.519 2017/07/12 08:15:31 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.519 2017/07/12 08:15:31 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    165 #else
    166 #define CALLOUT_FLAGS	0
    167 #endif
    168 
    169 /*
    170  * This device driver's max interrupt numbers.
    171  */
    172 #define WM_MAX_NQUEUEINTR	16
    173 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    174 
    175 #ifndef WM_DISABLE_MSI
    176 #define	WM_DISABLE_MSI 0
    177 #endif
    178 #ifndef WM_DISABLE_MSIX
    179 #define	WM_DISABLE_MSIX 0
    180 #endif
    181 
    182 int wm_disable_msi = WM_DISABLE_MSI;
    183 int wm_disable_msix = WM_DISABLE_MSIX;
    184 
    185 /*
    186  * Transmit descriptor list size.  Due to errata, we can only have
    187  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    188  * on >= 82544.  We tell the upper layers that they can queue a lot
    189  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    190  * of them at a time.
    191  *
    192  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    193  * chains containing many small mbufs have been observed in zero-copy
    194  * situations with jumbo frames.
    195  */
    196 #define	WM_NTXSEGS		256
    197 #define	WM_IFQUEUELEN		256
    198 #define	WM_TXQUEUELEN_MAX	64
    199 #define	WM_TXQUEUELEN_MAX_82547	16
    200 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    201 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    202 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    203 #define	WM_NTXDESC_82542	256
    204 #define	WM_NTXDESC_82544	4096
    205 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    206 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    207 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    208 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    209 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    210 
    211 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    212 
    213 #define	WM_TXINTERQSIZE		256
    214 
    215 /*
    216  * Receive descriptor list size.  We have one Rx buffer for normal
    217  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    218  * packet.  We allocate 256 receive descriptors, each with a 2k
    219  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    220  */
    221 #define	WM_NRXDESC		256
    222 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    223 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    224 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    225 
    226 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    227 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    228 #endif
    229 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    230 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    231 #endif
    232 
    233 typedef union txdescs {
    234 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    235 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    236 } txdescs_t;
    237 
    238 typedef union rxdescs {
    239 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    240 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    241 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    242 } rxdescs_t;
    243 
    244 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    245 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    246 
    247 /*
    248  * Software state for transmit jobs.
    249  */
    250 struct wm_txsoft {
    251 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    252 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    253 	int txs_firstdesc;		/* first descriptor in packet */
    254 	int txs_lastdesc;		/* last descriptor in packet */
    255 	int txs_ndesc;			/* # of descriptors used */
    256 };
    257 
    258 /*
    259  * Software state for receive buffers.  Each descriptor gets a
    260  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    261  * more than one buffer, we chain them together.
    262  */
    263 struct wm_rxsoft {
    264 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    265 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    266 };
    267 
    268 #define WM_LINKUP_TIMEOUT	50
    269 
    270 static uint16_t swfwphysem[] = {
    271 	SWFW_PHY0_SM,
    272 	SWFW_PHY1_SM,
    273 	SWFW_PHY2_SM,
    274 	SWFW_PHY3_SM
    275 };
    276 
    277 static const uint32_t wm_82580_rxpbs_table[] = {
    278 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    279 };
    280 
    281 struct wm_softc;
    282 
    283 #ifdef WM_EVENT_COUNTERS
    284 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    285 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    286 	struct evcnt qname##_ev_##evname;
    287 
    288 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    289 	do{								\
    290 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    291 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    292 		    "%s%02d%s", #qname, (qnum), #evname);		\
    293 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    294 		    (evtype), NULL, (xname),				\
    295 		    (q)->qname##_##evname##_evcnt_name);		\
    296 	}while(0)
    297 
    298 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    299 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    300 
    301 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    302 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    303 
    304 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    305 	evcnt_detach(&(q)->qname##_ev_##evname);
    306 #endif /* WM_EVENT_COUNTERS */
    307 
    308 struct wm_txqueue {
    309 	kmutex_t *txq_lock;		/* lock for tx operations */
    310 
    311 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    312 
    313 	/* Software state for the transmit descriptors. */
    314 	int txq_num;			/* must be a power of two */
    315 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    316 
    317 	/* TX control data structures. */
    318 	int txq_ndesc;			/* must be a power of two */
    319 	size_t txq_descsize;		/* a tx descriptor size */
    320 	txdescs_t *txq_descs_u;
    321         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    322 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    323 	int txq_desc_rseg;		/* real number of control segment */
    324 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    325 #define	txq_descs	txq_descs_u->sctxu_txdescs
    326 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    327 
    328 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    329 
    330 	int txq_free;			/* number of free Tx descriptors */
    331 	int txq_next;			/* next ready Tx descriptor */
    332 
    333 	int txq_sfree;			/* number of free Tx jobs */
    334 	int txq_snext;			/* next free Tx job */
    335 	int txq_sdirty;			/* dirty Tx jobs */
    336 
    337 	/* These 4 variables are used only on the 82547. */
    338 	int txq_fifo_size;		/* Tx FIFO size */
    339 	int txq_fifo_head;		/* current head of FIFO */
    340 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    341 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    342 
    343 	/*
    344 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    345 	 * CPUs. This queue intermediate them without block.
    346 	 */
    347 	pcq_t *txq_interq;
    348 
    349 	/*
    350 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    351 	 * to manage Tx H/W queue's busy flag.
    352 	 */
    353 	int txq_flags;			/* flags for H/W queue, see below */
    354 #define	WM_TXQ_NO_SPACE	0x1
    355 
    356 	bool txq_stopping;
    357 
    358 	uint32_t txq_packets;		/* for AIM */
    359 	uint32_t txq_bytes;		/* for AIM */
    360 #ifdef WM_EVENT_COUNTERS
    361 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    362 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    363 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    364 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    365 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    366 						/* XXX not used? */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    369 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    374 
    375 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    376 
    377 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    378 
    379 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    380 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    381 #endif /* WM_EVENT_COUNTERS */
    382 };
    383 
    384 struct wm_rxqueue {
    385 	kmutex_t *rxq_lock;		/* lock for rx operations */
    386 
    387 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    388 
    389 	/* Software state for the receive descriptors. */
    390 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    391 
    392 	/* RX control data structures. */
    393 	int rxq_ndesc;			/* must be a power of two */
    394 	size_t rxq_descsize;		/* a rx descriptor size */
    395 	rxdescs_t *rxq_descs_u;
    396 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    397 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    398 	int rxq_desc_rseg;		/* real number of control segment */
    399 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    400 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    401 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    402 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    403 
    404 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    405 
    406 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    407 	int rxq_discard;
    408 	int rxq_len;
    409 	struct mbuf *rxq_head;
    410 	struct mbuf *rxq_tail;
    411 	struct mbuf **rxq_tailp;
    412 
    413 	bool rxq_stopping;
    414 
    415 	uint32_t rxq_packets;		/* for AIM */
    416 	uint32_t rxq_bytes;		/* for AIM */
    417 #ifdef WM_EVENT_COUNTERS
    418 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    419 
    420 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    421 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    422 #endif
    423 };
    424 
    425 struct wm_queue {
    426 	int wmq_id;			/* index of transmit and receive queues */
    427 	int wmq_intr_idx;		/* index of MSI-X tables */
    428 
    429 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    430 	bool wmq_set_itr;
    431 
    432 	struct wm_txqueue wmq_txq;
    433 	struct wm_rxqueue wmq_rxq;
    434 
    435 	void *wmq_si;
    436 };
    437 
    438 struct wm_phyop {
    439 	int (*acquire)(struct wm_softc *);
    440 	void (*release)(struct wm_softc *);
    441 	int reset_delay_us;
    442 };
    443 
    444 /*
    445  * Software state per device.
    446  */
    447 struct wm_softc {
    448 	device_t sc_dev;		/* generic device information */
    449 	bus_space_tag_t sc_st;		/* bus space tag */
    450 	bus_space_handle_t sc_sh;	/* bus space handle */
    451 	bus_size_t sc_ss;		/* bus space size */
    452 	bus_space_tag_t sc_iot;		/* I/O space tag */
    453 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    454 	bus_size_t sc_ios;		/* I/O space size */
    455 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    456 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    457 	bus_size_t sc_flashs;		/* flash registers space size */
    458 	off_t sc_flashreg_offset;	/*
    459 					 * offset to flash registers from
    460 					 * start of BAR
    461 					 */
    462 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    463 
    464 	struct ethercom sc_ethercom;	/* ethernet common data */
    465 	struct mii_data sc_mii;		/* MII/media information */
    466 
    467 	pci_chipset_tag_t sc_pc;
    468 	pcitag_t sc_pcitag;
    469 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    470 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    471 
    472 	uint16_t sc_pcidevid;		/* PCI device ID */
    473 	wm_chip_type sc_type;		/* MAC type */
    474 	int sc_rev;			/* MAC revision */
    475 	wm_phy_type sc_phytype;		/* PHY type */
    476 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    477 #define	WM_MEDIATYPE_UNKNOWN		0x00
    478 #define	WM_MEDIATYPE_FIBER		0x01
    479 #define	WM_MEDIATYPE_COPPER		0x02
    480 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    481 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    482 	int sc_flags;			/* flags; see below */
    483 	int sc_if_flags;		/* last if_flags */
    484 	int sc_flowflags;		/* 802.3x flow control flags */
    485 	int sc_align_tweak;
    486 
    487 	void *sc_ihs[WM_MAX_NINTR];	/*
    488 					 * interrupt cookie.
    489 					 * - legacy and msi use sc_ihs[0] only
    490 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    491 					 */
    492 	pci_intr_handle_t *sc_intrs;	/*
    493 					 * legacy and msi use sc_intrs[0] only
    494 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    495 					 */
    496 	int sc_nintrs;			/* number of interrupts */
    497 
    498 	int sc_link_intr_idx;		/* index of MSI-X tables */
    499 
    500 	callout_t sc_tick_ch;		/* tick callout */
    501 	bool sc_core_stopping;
    502 
    503 	int sc_nvm_ver_major;
    504 	int sc_nvm_ver_minor;
    505 	int sc_nvm_ver_build;
    506 	int sc_nvm_addrbits;		/* NVM address bits */
    507 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    508 	int sc_ich8_flash_base;
    509 	int sc_ich8_flash_bank_size;
    510 	int sc_nvm_k1_enabled;
    511 
    512 	int sc_nqueues;
    513 	struct wm_queue *sc_queue;
    514 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    515 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    516 
    517 	int sc_affinity_offset;
    518 
    519 #ifdef WM_EVENT_COUNTERS
    520 	/* Event counters. */
    521 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    522 
    523         /* WM_T_82542_2_1 only */
    524 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    525 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    526 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    527 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    528 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    529 #endif /* WM_EVENT_COUNTERS */
    530 
    531 	/* This variable are used only on the 82547. */
    532 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    533 
    534 	uint32_t sc_ctrl;		/* prototype CTRL register */
    535 #if 0
    536 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    537 #endif
    538 	uint32_t sc_icr;		/* prototype interrupt bits */
    539 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    540 	uint32_t sc_tctl;		/* prototype TCTL register */
    541 	uint32_t sc_rctl;		/* prototype RCTL register */
    542 	uint32_t sc_txcw;		/* prototype TXCW register */
    543 	uint32_t sc_tipg;		/* prototype TIPG register */
    544 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    545 	uint32_t sc_pba;		/* prototype PBA register */
    546 
    547 	int sc_tbi_linkup;		/* TBI link status */
    548 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    549 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    550 
    551 	int sc_mchash_type;		/* multicast filter offset */
    552 
    553 	krndsource_t rnd_source;	/* random source */
    554 
    555 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    556 
    557 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    558 	kmutex_t *sc_ich_phymtx;	/*
    559 					 * 82574/82583/ICH/PCH specific PHY
    560 					 * mutex. For 82574/82583, the mutex
    561 					 * is used for both PHY and NVM.
    562 					 */
    563 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    564 
    565 	struct wm_phyop phy;
    566 };
    567 
    568 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    569 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    570 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    571 
    572 #define	WM_RXCHAIN_RESET(rxq)						\
    573 do {									\
    574 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    575 	*(rxq)->rxq_tailp = NULL;					\
    576 	(rxq)->rxq_len = 0;						\
    577 } while (/*CONSTCOND*/0)
    578 
    579 #define	WM_RXCHAIN_LINK(rxq, m)						\
    580 do {									\
    581 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    582 	(rxq)->rxq_tailp = &(m)->m_next;				\
    583 } while (/*CONSTCOND*/0)
    584 
    585 #ifdef WM_EVENT_COUNTERS
    586 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    587 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    588 
    589 #define WM_Q_EVCNT_INCR(qname, evname)			\
    590 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    591 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    592 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    593 #else /* !WM_EVENT_COUNTERS */
    594 #define	WM_EVCNT_INCR(ev)	/* nothing */
    595 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    598 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    599 #endif /* !WM_EVENT_COUNTERS */
    600 
    601 #define	CSR_READ(sc, reg)						\
    602 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    603 #define	CSR_WRITE(sc, reg, val)						\
    604 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    605 #define	CSR_WRITE_FLUSH(sc)						\
    606 	(void) CSR_READ((sc), WMREG_STATUS)
    607 
    608 #define ICH8_FLASH_READ32(sc, reg)					\
    609 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    610 	    (reg) + sc->sc_flashreg_offset)
    611 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    612 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    613 	    (reg) + sc->sc_flashreg_offset, (data))
    614 
    615 #define ICH8_FLASH_READ16(sc, reg)					\
    616 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    617 	    (reg) + sc->sc_flashreg_offset)
    618 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    619 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    620 	    (reg) + sc->sc_flashreg_offset, (data))
    621 
    622 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    623 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    624 
    625 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    626 #define	WM_CDTXADDR_HI(txq, x)						\
    627 	(sizeof(bus_addr_t) == 8 ?					\
    628 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    629 
    630 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    631 #define	WM_CDRXADDR_HI(rxq, x)						\
    632 	(sizeof(bus_addr_t) == 8 ?					\
    633 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    634 
    635 /*
    636  * Register read/write functions.
    637  * Other than CSR_{READ|WRITE}().
    638  */
    639 #if 0
    640 static inline uint32_t wm_io_read(struct wm_softc *, int);
    641 #endif
    642 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    643 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    644 	uint32_t, uint32_t);
    645 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    646 
    647 /*
    648  * Descriptor sync/init functions.
    649  */
    650 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    651 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    652 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    653 
    654 /*
    655  * Device driver interface functions and commonly used functions.
    656  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    657  */
    658 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    659 static int	wm_match(device_t, cfdata_t, void *);
    660 static void	wm_attach(device_t, device_t, void *);
    661 static int	wm_detach(device_t, int);
    662 static bool	wm_suspend(device_t, const pmf_qual_t *);
    663 static bool	wm_resume(device_t, const pmf_qual_t *);
    664 static void	wm_watchdog(struct ifnet *);
    665 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    666 static void	wm_tick(void *);
    667 static int	wm_ifflags_cb(struct ethercom *);
    668 static int	wm_ioctl(struct ifnet *, u_long, void *);
    669 /* MAC address related */
    670 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    671 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    672 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    673 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    674 static void	wm_set_filter(struct wm_softc *);
    675 /* Reset and init related */
    676 static void	wm_set_vlan(struct wm_softc *);
    677 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    678 static void	wm_get_auto_rd_done(struct wm_softc *);
    679 static void	wm_lan_init_done(struct wm_softc *);
    680 static void	wm_get_cfg_done(struct wm_softc *);
    681 static void	wm_phy_post_reset(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 /* EEE */
    913 static void	wm_set_eee_i350(struct wm_softc *);
    914 
    915 /*
    916  * Workarounds (mainly PHY related).
    917  * Basically, PHY's workarounds are in the PHY drivers.
    918  */
    919 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    920 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    923 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    924 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    925 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    926 static void	wm_reset_init_script_82575(struct wm_softc *);
    927 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    928 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    929 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    930 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    931 static void	wm_pll_workaround_i210(struct wm_softc *);
    932 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	char buf[256];
   1678 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1679 	pcireg_t preg, memtype;
   1680 	uint16_t eeprom_data, apme_mask;
   1681 	bool force_clear_smbi;
   1682 	uint32_t link_mode;
   1683 	uint32_t reg;
   1684 
   1685 	sc->sc_dev = self;
   1686 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1687 	sc->sc_core_stopping = false;
   1688 
   1689 	wmp = wm_lookup(pa);
   1690 #ifdef DIAGNOSTIC
   1691 	if (wmp == NULL) {
   1692 		printf("\n");
   1693 		panic("wm_attach: impossible");
   1694 	}
   1695 #endif
   1696 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1697 
   1698 	sc->sc_pc = pa->pa_pc;
   1699 	sc->sc_pcitag = pa->pa_tag;
   1700 
   1701 	if (pci_dma64_available(pa))
   1702 		sc->sc_dmat = pa->pa_dmat64;
   1703 	else
   1704 		sc->sc_dmat = pa->pa_dmat;
   1705 
   1706 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1707 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1708 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1709 
   1710 	sc->sc_type = wmp->wmp_type;
   1711 
   1712 	/* Set default function pointers */
   1713 	sc->phy.acquire = wm_get_null;
   1714 	sc->phy.release = wm_put_null;
   1715 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1716 
   1717 	if (sc->sc_type < WM_T_82543) {
   1718 		if (sc->sc_rev < 2) {
   1719 			aprint_error_dev(sc->sc_dev,
   1720 			    "i82542 must be at least rev. 2\n");
   1721 			return;
   1722 		}
   1723 		if (sc->sc_rev < 3)
   1724 			sc->sc_type = WM_T_82542_2_0;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Disable MSI for Errata:
   1729 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1730 	 *
   1731 	 *  82544: Errata 25
   1732 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1733 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1734 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1735 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1736 	 *
   1737 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1738 	 *
   1739 	 *  82571 & 82572: Errata 63
   1740 	 */
   1741 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1742 	    || (sc->sc_type == WM_T_82572))
   1743 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1744 
   1745 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1746 	    || (sc->sc_type == WM_T_82580)
   1747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1748 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1749 		sc->sc_flags |= WM_F_NEWQUEUE;
   1750 
   1751 	/* Set device properties (mactype) */
   1752 	dict = device_properties(sc->sc_dev);
   1753 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1754 
   1755 	/*
   1756 	 * Map the device.  All devices support memory-mapped acccess,
   1757 	 * and it is really required for normal operation.
   1758 	 */
   1759 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1760 	switch (memtype) {
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1763 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1764 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1765 		break;
   1766 	default:
   1767 		memh_valid = 0;
   1768 		break;
   1769 	}
   1770 
   1771 	if (memh_valid) {
   1772 		sc->sc_st = memt;
   1773 		sc->sc_sh = memh;
   1774 		sc->sc_ss = memsize;
   1775 	} else {
   1776 		aprint_error_dev(sc->sc_dev,
   1777 		    "unable to map device registers\n");
   1778 		return;
   1779 	}
   1780 
   1781 	/*
   1782 	 * In addition, i82544 and later support I/O mapped indirect
   1783 	 * register access.  It is not desirable (nor supported in
   1784 	 * this driver) to use it for normal operation, though it is
   1785 	 * required to work around bugs in some chip versions.
   1786 	 */
   1787 	if (sc->sc_type >= WM_T_82544) {
   1788 		/* First we have to find the I/O BAR. */
   1789 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1790 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1791 			if (memtype == PCI_MAPREG_TYPE_IO)
   1792 				break;
   1793 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1794 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1795 				i += 4;	/* skip high bits, too */
   1796 		}
   1797 		if (i < PCI_MAPREG_END) {
   1798 			/*
   1799 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1800 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1801 			 * It's no problem because newer chips has no this
   1802 			 * bug.
   1803 			 *
   1804 			 * The i8254x doesn't apparently respond when the
   1805 			 * I/O BAR is 0, which looks somewhat like it's not
   1806 			 * been configured.
   1807 			 */
   1808 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1809 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "WARNING: I/O BAR at zero.\n");
   1812 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1813 					0, &sc->sc_iot, &sc->sc_ioh,
   1814 					NULL, &sc->sc_ios) == 0) {
   1815 				sc->sc_flags |= WM_F_IOH_VALID;
   1816 			} else {
   1817 				aprint_error_dev(sc->sc_dev,
   1818 				    "WARNING: unable to map I/O space\n");
   1819 			}
   1820 		}
   1821 
   1822 	}
   1823 
   1824 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1825 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1826 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1827 	if (sc->sc_type < WM_T_82542_2_1)
   1828 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1829 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1830 
   1831 	/* power up chip */
   1832 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1833 	    NULL)) && error != EOPNOTSUPP) {
   1834 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1835 		return;
   1836 	}
   1837 
   1838 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1839 
   1840 	/* Allocation settings */
   1841 	max_type = PCI_INTR_TYPE_MSIX;
   1842 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1843 	counts[PCI_INTR_TYPE_MSI] = 1;
   1844 	counts[PCI_INTR_TYPE_INTX] = 1;
   1845 	/* overridden by disable flags */
   1846 	if (wm_disable_msi != 0) {
   1847 		counts[PCI_INTR_TYPE_MSI] = 0;
   1848 		if (wm_disable_msix != 0) {
   1849 			max_type = PCI_INTR_TYPE_INTX;
   1850 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1851 		}
   1852 	} else if (wm_disable_msix != 0) {
   1853 		max_type = PCI_INTR_TYPE_MSI;
   1854 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1855 	}
   1856 
   1857 alloc_retry:
   1858 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1859 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1860 		return;
   1861 	}
   1862 
   1863 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1864 		error = wm_setup_msix(sc);
   1865 		if (error) {
   1866 			pci_intr_release(pc, sc->sc_intrs,
   1867 			    counts[PCI_INTR_TYPE_MSIX]);
   1868 
   1869 			/* Setup for MSI: Disable MSI-X */
   1870 			max_type = PCI_INTR_TYPE_MSI;
   1871 			counts[PCI_INTR_TYPE_MSI] = 1;
   1872 			counts[PCI_INTR_TYPE_INTX] = 1;
   1873 			goto alloc_retry;
   1874 		}
   1875 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1876 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1877 		error = wm_setup_legacy(sc);
   1878 		if (error) {
   1879 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1880 			    counts[PCI_INTR_TYPE_MSI]);
   1881 
   1882 			/* The next try is for INTx: Disable MSI */
   1883 			max_type = PCI_INTR_TYPE_INTX;
   1884 			counts[PCI_INTR_TYPE_INTX] = 1;
   1885 			goto alloc_retry;
   1886 		}
   1887 	} else {
   1888 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1889 		error = wm_setup_legacy(sc);
   1890 		if (error) {
   1891 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1892 			    counts[PCI_INTR_TYPE_INTX]);
   1893 			return;
   1894 		}
   1895 	}
   1896 
   1897 	/*
   1898 	 * Check the function ID (unit number of the chip).
   1899 	 */
   1900 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1901 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1902 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1903 	    || (sc->sc_type == WM_T_82580)
   1904 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1905 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1906 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1907 	else
   1908 		sc->sc_funcid = 0;
   1909 
   1910 	/*
   1911 	 * Determine a few things about the bus we're connected to.
   1912 	 */
   1913 	if (sc->sc_type < WM_T_82543) {
   1914 		/* We don't really know the bus characteristics here. */
   1915 		sc->sc_bus_speed = 33;
   1916 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1917 		/*
   1918 		 * CSA (Communication Streaming Architecture) is about as fast
   1919 		 * a 32-bit 66MHz PCI Bus.
   1920 		 */
   1921 		sc->sc_flags |= WM_F_CSA;
   1922 		sc->sc_bus_speed = 66;
   1923 		aprint_verbose_dev(sc->sc_dev,
   1924 		    "Communication Streaming Architecture\n");
   1925 		if (sc->sc_type == WM_T_82547) {
   1926 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1927 			callout_setfunc(&sc->sc_txfifo_ch,
   1928 					wm_82547_txfifo_stall, sc);
   1929 			aprint_verbose_dev(sc->sc_dev,
   1930 			    "using 82547 Tx FIFO stall work-around\n");
   1931 		}
   1932 	} else if (sc->sc_type >= WM_T_82571) {
   1933 		sc->sc_flags |= WM_F_PCIE;
   1934 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1935 		    && (sc->sc_type != WM_T_ICH10)
   1936 		    && (sc->sc_type != WM_T_PCH)
   1937 		    && (sc->sc_type != WM_T_PCH2)
   1938 		    && (sc->sc_type != WM_T_PCH_LPT)
   1939 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1940 			/* ICH* and PCH* have no PCIe capability registers */
   1941 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1942 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1943 				NULL) == 0)
   1944 				aprint_error_dev(sc->sc_dev,
   1945 				    "unable to find PCIe capability\n");
   1946 		}
   1947 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1948 	} else {
   1949 		reg = CSR_READ(sc, WMREG_STATUS);
   1950 		if (reg & STATUS_BUS64)
   1951 			sc->sc_flags |= WM_F_BUS64;
   1952 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1953 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1954 
   1955 			sc->sc_flags |= WM_F_PCIX;
   1956 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1957 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIX capability\n");
   1960 			else if (sc->sc_type != WM_T_82545_3 &&
   1961 				 sc->sc_type != WM_T_82546_3) {
   1962 				/*
   1963 				 * Work around a problem caused by the BIOS
   1964 				 * setting the max memory read byte count
   1965 				 * incorrectly.
   1966 				 */
   1967 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1968 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1969 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1971 
   1972 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1973 				    PCIX_CMD_BYTECNT_SHIFT;
   1974 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1975 				    PCIX_STATUS_MAXB_SHIFT;
   1976 				if (bytecnt > maxb) {
   1977 					aprint_verbose_dev(sc->sc_dev,
   1978 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1979 					    512 << bytecnt, 512 << maxb);
   1980 					pcix_cmd = (pcix_cmd &
   1981 					    ~PCIX_CMD_BYTECNT_MASK) |
   1982 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1983 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1984 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1985 					    pcix_cmd);
   1986 				}
   1987 			}
   1988 		}
   1989 		/*
   1990 		 * The quad port adapter is special; it has a PCIX-PCIX
   1991 		 * bridge on the board, and can run the secondary bus at
   1992 		 * a higher speed.
   1993 		 */
   1994 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1995 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1996 								      : 66;
   1997 		} else if (sc->sc_flags & WM_F_PCIX) {
   1998 			switch (reg & STATUS_PCIXSPD_MASK) {
   1999 			case STATUS_PCIXSPD_50_66:
   2000 				sc->sc_bus_speed = 66;
   2001 				break;
   2002 			case STATUS_PCIXSPD_66_100:
   2003 				sc->sc_bus_speed = 100;
   2004 				break;
   2005 			case STATUS_PCIXSPD_100_133:
   2006 				sc->sc_bus_speed = 133;
   2007 				break;
   2008 			default:
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2011 				    reg & STATUS_PCIXSPD_MASK);
   2012 				sc->sc_bus_speed = 66;
   2013 				break;
   2014 			}
   2015 		} else
   2016 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2017 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2018 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2019 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2020 	}
   2021 
   2022 	/* clear interesting stat counters */
   2023 	CSR_READ(sc, WMREG_COLC);
   2024 	CSR_READ(sc, WMREG_RXERRC);
   2025 
   2026 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2027 	    || (sc->sc_type >= WM_T_ICH8))
   2028 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2029 	if (sc->sc_type >= WM_T_ICH8)
   2030 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 
   2032 	/* Set PHY, NVM mutex related stuff */
   2033 	switch (sc->sc_type) {
   2034 	case WM_T_82542_2_0:
   2035 	case WM_T_82542_2_1:
   2036 	case WM_T_82543:
   2037 	case WM_T_82544:
   2038 		/* Microwire */
   2039 		sc->sc_nvm_wordsize = 64;
   2040 		sc->sc_nvm_addrbits = 6;
   2041 		break;
   2042 	case WM_T_82540:
   2043 	case WM_T_82545:
   2044 	case WM_T_82545_3:
   2045 	case WM_T_82546:
   2046 	case WM_T_82546_3:
   2047 		/* Microwire */
   2048 		reg = CSR_READ(sc, WMREG_EECD);
   2049 		if (reg & EECD_EE_SIZE) {
   2050 			sc->sc_nvm_wordsize = 256;
   2051 			sc->sc_nvm_addrbits = 8;
   2052 		} else {
   2053 			sc->sc_nvm_wordsize = 64;
   2054 			sc->sc_nvm_addrbits = 6;
   2055 		}
   2056 		sc->sc_flags |= WM_F_LOCK_EECD;
   2057 		break;
   2058 	case WM_T_82541:
   2059 	case WM_T_82541_2:
   2060 	case WM_T_82547:
   2061 	case WM_T_82547_2:
   2062 		sc->sc_flags |= WM_F_LOCK_EECD;
   2063 		reg = CSR_READ(sc, WMREG_EECD);
   2064 		if (reg & EECD_EE_TYPE) {
   2065 			/* SPI */
   2066 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2067 			wm_nvm_set_addrbits_size_eecd(sc);
   2068 		} else {
   2069 			/* Microwire */
   2070 			if ((reg & EECD_EE_ABITS) != 0) {
   2071 				sc->sc_nvm_wordsize = 256;
   2072 				sc->sc_nvm_addrbits = 8;
   2073 			} else {
   2074 				sc->sc_nvm_wordsize = 64;
   2075 				sc->sc_nvm_addrbits = 6;
   2076 			}
   2077 		}
   2078 		break;
   2079 	case WM_T_82571:
   2080 	case WM_T_82572:
   2081 		/* SPI */
   2082 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 		wm_nvm_set_addrbits_size_eecd(sc);
   2084 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2085 		sc->phy.acquire = wm_get_swsm_semaphore;
   2086 		sc->phy.release = wm_put_swsm_semaphore;
   2087 		break;
   2088 	case WM_T_82573:
   2089 	case WM_T_82574:
   2090 	case WM_T_82583:
   2091 		if (sc->sc_type == WM_T_82573) {
   2092 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2093 			sc->phy.acquire = wm_get_swsm_semaphore;
   2094 			sc->phy.release = wm_put_swsm_semaphore;
   2095 		} else {
   2096 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2097 			/* Both PHY and NVM use the same semaphore. */
   2098 			sc->phy.acquire
   2099 			    = wm_get_swfwhw_semaphore;
   2100 			sc->phy.release
   2101 			    = wm_put_swfwhw_semaphore;
   2102 		}
   2103 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2104 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2105 			sc->sc_nvm_wordsize = 2048;
   2106 		} else {
   2107 			/* SPI */
   2108 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2109 			wm_nvm_set_addrbits_size_eecd(sc);
   2110 		}
   2111 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354:
   2118 	case WM_T_80003:
   2119 		/* SPI */
   2120 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2121 		wm_nvm_set_addrbits_size_eecd(sc);
   2122 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2123 		    | WM_F_LOCK_SWSM;
   2124 		sc->phy.acquire = wm_get_phy_82575;
   2125 		sc->phy.release = wm_put_phy_82575;
   2126 		break;
   2127 	case WM_T_ICH8:
   2128 	case WM_T_ICH9:
   2129 	case WM_T_ICH10:
   2130 	case WM_T_PCH:
   2131 	case WM_T_PCH2:
   2132 	case WM_T_PCH_LPT:
   2133 		/* FLASH */
   2134 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2135 		sc->sc_nvm_wordsize = 2048;
   2136 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2137 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2138 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2139 			aprint_error_dev(sc->sc_dev,
   2140 			    "can't map FLASH registers\n");
   2141 			goto out;
   2142 		}
   2143 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2144 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2145 		    ICH_FLASH_SECTOR_SIZE;
   2146 		sc->sc_ich8_flash_bank_size =
   2147 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2148 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2149 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2150 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2151 		sc->sc_flashreg_offset = 0;
   2152 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2153 		sc->phy.release = wm_put_swflag_ich8lan;
   2154 		break;
   2155 	case WM_T_PCH_SPT:
   2156 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2157 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2158 		sc->sc_flasht = sc->sc_st;
   2159 		sc->sc_flashh = sc->sc_sh;
   2160 		sc->sc_ich8_flash_base = 0;
   2161 		sc->sc_nvm_wordsize =
   2162 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2163 			* NVM_SIZE_MULTIPLIER;
   2164 		/* It is size in bytes, we want words */
   2165 		sc->sc_nvm_wordsize /= 2;
   2166 		/* assume 2 banks */
   2167 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2168 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2169 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2170 		sc->phy.release = wm_put_swflag_ich8lan;
   2171 		break;
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2177 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2178 		} else {
   2179 			sc->sc_nvm_wordsize = INVM_SIZE;
   2180 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2181 		}
   2182 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2183 		sc->phy.acquire = wm_get_phy_82575;
   2184 		sc->phy.release = wm_put_phy_82575;
   2185 		break;
   2186 	default:
   2187 		break;
   2188 	}
   2189 
   2190 	/* Reset the chip to a known state. */
   2191 	wm_reset(sc);
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	/* Set device properties (macflags) */
   2240 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2241 
   2242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2243 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2244 	else {
   2245 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2246 		    sc->sc_nvm_wordsize);
   2247 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2248 			aprint_verbose("iNVM");
   2249 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2250 			aprint_verbose("FLASH(HW)");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2252 			aprint_verbose("FLASH");
   2253 		else {
   2254 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2255 				eetype = "SPI";
   2256 			else
   2257 				eetype = "MicroWire";
   2258 			aprint_verbose("(%d address bits) %s EEPROM",
   2259 			    sc->sc_nvm_addrbits, eetype);
   2260 		}
   2261 	}
   2262 	wm_nvm_version(sc);
   2263 	aprint_verbose("\n");
   2264 
   2265 	/* Check for I21[01] PLL workaround */
   2266 	if (sc->sc_type == WM_T_I210)
   2267 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2268 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2269 		/* NVM image release 3.25 has a workaround */
   2270 		if ((sc->sc_nvm_ver_major < 3)
   2271 		    || ((sc->sc_nvm_ver_major == 3)
   2272 			&& (sc->sc_nvm_ver_minor < 25))) {
   2273 			aprint_verbose_dev(sc->sc_dev,
   2274 			    "ROM image version %d.%d is older than 3.25\n",
   2275 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2276 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2277 		}
   2278 	}
   2279 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2280 		wm_pll_workaround_i210(sc);
   2281 
   2282 	wm_get_wakeup(sc);
   2283 
   2284 	/* Non-AMT based hardware can now take control from firmware */
   2285 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2286 		wm_get_hw_control(sc);
   2287 
   2288 	/*
   2289 	 * Read the Ethernet address from the EEPROM, if not first found
   2290 	 * in device properties.
   2291 	 */
   2292 	ea = prop_dictionary_get(dict, "mac-address");
   2293 	if (ea != NULL) {
   2294 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2295 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2296 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2297 	} else {
   2298 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2299 			aprint_error_dev(sc->sc_dev,
   2300 			    "unable to read Ethernet address\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2306 	    ether_sprintf(enaddr));
   2307 
   2308 	/*
   2309 	 * Read the config info from the EEPROM, and set up various
   2310 	 * bits in the control registers based on their contents.
   2311 	 */
   2312 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2313 	if (pn != NULL) {
   2314 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2316 	} else {
   2317 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2318 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2319 			goto out;
   2320 		}
   2321 	}
   2322 
   2323 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2324 	if (pn != NULL) {
   2325 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2326 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2327 	} else {
   2328 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2329 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2330 			goto out;
   2331 		}
   2332 	}
   2333 
   2334 	/* check for WM_F_WOL */
   2335 	switch (sc->sc_type) {
   2336 	case WM_T_82542_2_0:
   2337 	case WM_T_82542_2_1:
   2338 	case WM_T_82543:
   2339 		/* dummy? */
   2340 		eeprom_data = 0;
   2341 		apme_mask = NVM_CFG3_APME;
   2342 		break;
   2343 	case WM_T_82544:
   2344 		apme_mask = NVM_CFG2_82544_APM_EN;
   2345 		eeprom_data = cfg2;
   2346 		break;
   2347 	case WM_T_82546:
   2348 	case WM_T_82546_3:
   2349 	case WM_T_82571:
   2350 	case WM_T_82572:
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 	case WM_T_80003:
   2355 	default:
   2356 		apme_mask = NVM_CFG3_APME;
   2357 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2358 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2359 		break;
   2360 	case WM_T_82575:
   2361 	case WM_T_82576:
   2362 	case WM_T_82580:
   2363 	case WM_T_I350:
   2364 	case WM_T_I354: /* XXX ok? */
   2365 	case WM_T_ICH8:
   2366 	case WM_T_ICH9:
   2367 	case WM_T_ICH10:
   2368 	case WM_T_PCH:
   2369 	case WM_T_PCH2:
   2370 	case WM_T_PCH_LPT:
   2371 	case WM_T_PCH_SPT:
   2372 		/* XXX The funcid should be checked on some devices */
   2373 		apme_mask = WUC_APME;
   2374 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2375 		break;
   2376 	}
   2377 
   2378 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2379 	if ((eeprom_data & apme_mask) != 0)
   2380 		sc->sc_flags |= WM_F_WOL;
   2381 
   2382 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2383 		/* Check NVM for autonegotiation */
   2384 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2385 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2386 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2387 		}
   2388 	}
   2389 
   2390 	/*
   2391 	 * XXX need special handling for some multiple port cards
   2392 	 * to disable a paticular port.
   2393 	 */
   2394 
   2395 	if (sc->sc_type >= WM_T_82544) {
   2396 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2397 		if (pn != NULL) {
   2398 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2399 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2400 		} else {
   2401 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2402 				aprint_error_dev(sc->sc_dev,
   2403 				    "unable to read SWDPIN\n");
   2404 				goto out;
   2405 			}
   2406 		}
   2407 	}
   2408 
   2409 	if (cfg1 & NVM_CFG1_ILOS)
   2410 		sc->sc_ctrl |= CTRL_ILOS;
   2411 
   2412 	/*
   2413 	 * XXX
   2414 	 * This code isn't correct because pin 2 and 3 are located
   2415 	 * in different position on newer chips. Check all datasheet.
   2416 	 *
   2417 	 * Until resolve this problem, check if a chip < 82580
   2418 	 */
   2419 	if (sc->sc_type <= WM_T_82580) {
   2420 		if (sc->sc_type >= WM_T_82544) {
   2421 			sc->sc_ctrl |=
   2422 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2423 			    CTRL_SWDPIO_SHIFT;
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPINS_SHIFT;
   2427 		} else {
   2428 			sc->sc_ctrl |=
   2429 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPIO_SHIFT;
   2431 		}
   2432 	}
   2433 
   2434 	/* XXX For other than 82580? */
   2435 	if (sc->sc_type == WM_T_82580) {
   2436 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2437 		if (nvmword & __BIT(13))
   2438 			sc->sc_ctrl |= CTRL_ILOS;
   2439 	}
   2440 
   2441 #if 0
   2442 	if (sc->sc_type >= WM_T_82544) {
   2443 		if (cfg1 & NVM_CFG1_IPS0)
   2444 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2445 		if (cfg1 & NVM_CFG1_IPS1)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2447 		sc->sc_ctrl_ext |=
   2448 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2449 		    CTRL_EXT_SWDPIO_SHIFT;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPINS_SHIFT;
   2453 	} else {
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2456 		    CTRL_EXT_SWDPIO_SHIFT;
   2457 	}
   2458 #endif
   2459 
   2460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2461 #if 0
   2462 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2463 #endif
   2464 
   2465 	if (sc->sc_type == WM_T_PCH) {
   2466 		uint16_t val;
   2467 
   2468 		/* Save the NVM K1 bit setting */
   2469 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2470 
   2471 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2472 			sc->sc_nvm_k1_enabled = 1;
   2473 		else
   2474 			sc->sc_nvm_k1_enabled = 0;
   2475 	}
   2476 
   2477 	/*
   2478 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2479 	 * media structures accordingly.
   2480 	 */
   2481 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2482 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2483 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2484 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2485 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2486 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2487 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2489 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2490 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2491 	    || (sc->sc_type ==WM_T_I211)) {
   2492 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2493 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2494 		switch (link_mode) {
   2495 		case CTRL_EXT_LINK_MODE_1000KX:
   2496 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2497 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2498 			break;
   2499 		case CTRL_EXT_LINK_MODE_SGMII:
   2500 			if (wm_sgmii_uses_mdio(sc)) {
   2501 				aprint_verbose_dev(sc->sc_dev,
   2502 				    "SGMII(MDIO)\n");
   2503 				sc->sc_flags |= WM_F_SGMII;
   2504 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 				break;
   2506 			}
   2507 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2508 			/*FALLTHROUGH*/
   2509 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2510 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2511 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2512 				if (link_mode
   2513 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2514 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2515 					sc->sc_flags |= WM_F_SGMII;
   2516 				} else {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2518 					aprint_verbose_dev(sc->sc_dev,
   2519 					    "SERDES\n");
   2520 				}
   2521 				break;
   2522 			}
   2523 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2524 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2525 
   2526 			/* Change current link mode setting */
   2527 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2528 			switch (sc->sc_mediatype) {
   2529 			case WM_MEDIATYPE_COPPER:
   2530 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2531 				break;
   2532 			case WM_MEDIATYPE_SERDES:
   2533 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2534 				break;
   2535 			default:
   2536 				break;
   2537 			}
   2538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2539 			break;
   2540 		case CTRL_EXT_LINK_MODE_GMII:
   2541 		default:
   2542 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2544 			break;
   2545 		}
   2546 
   2547 		reg &= ~CTRL_EXT_I2C_ENA;
   2548 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2549 			reg |= CTRL_EXT_I2C_ENA;
   2550 		else
   2551 			reg &= ~CTRL_EXT_I2C_ENA;
   2552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2553 
   2554 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2555 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2556 		else
   2557 			wm_tbi_mediainit(sc);
   2558 	} else if (sc->sc_type < WM_T_82543 ||
   2559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2561 			aprint_error_dev(sc->sc_dev,
   2562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2563 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2564 		}
   2565 		wm_tbi_mediainit(sc);
   2566 	} else {
   2567 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2568 			aprint_error_dev(sc->sc_dev,
   2569 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2570 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2571 		}
   2572 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2573 	}
   2574 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2575 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2576 
   2577 	ifp = &sc->sc_ethercom.ec_if;
   2578 	xname = device_xname(sc->sc_dev);
   2579 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2580 	ifp->if_softc = sc;
   2581 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2582 #ifdef WM_MPSAFE
   2583 	ifp->if_extflags = IFEF_START_MPSAFE;
   2584 #endif
   2585 	ifp->if_ioctl = wm_ioctl;
   2586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2587 		ifp->if_start = wm_nq_start;
   2588 		/*
   2589 		 * When the number of CPUs is one and the controller can use
   2590 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2591 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2592 		 * and the other is used for link status changing.
   2593 		 * In this situation, wm_nq_transmit() is disadvantageous
   2594 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2595 		 */
   2596 		if (wm_is_using_multiqueue(sc))
   2597 			ifp->if_transmit = wm_nq_transmit;
   2598 	} else {
   2599 		ifp->if_start = wm_start;
   2600 		/*
   2601 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_transmit;
   2605 	}
   2606 	ifp->if_watchdog = wm_watchdog;
   2607 	ifp->if_init = wm_init;
   2608 	ifp->if_stop = wm_stop;
   2609 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2610 	IFQ_SET_READY(&ifp->if_snd);
   2611 
   2612 	/* Check for jumbo frame */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82573:
   2615 		/* XXX limited to 9234 if ASPM is disabled */
   2616 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2617 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2618 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2619 		break;
   2620 	case WM_T_82571:
   2621 	case WM_T_82572:
   2622 	case WM_T_82574:
   2623 	case WM_T_82575:
   2624 	case WM_T_82576:
   2625 	case WM_T_82580:
   2626 	case WM_T_I350:
   2627 	case WM_T_I354: /* XXXX ok? */
   2628 	case WM_T_I210:
   2629 	case WM_T_I211:
   2630 	case WM_T_80003:
   2631 	case WM_T_ICH9:
   2632 	case WM_T_ICH10:
   2633 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2634 	case WM_T_PCH_LPT:
   2635 	case WM_T_PCH_SPT:
   2636 		/* XXX limited to 9234 */
   2637 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2638 		break;
   2639 	case WM_T_PCH:
   2640 		/* XXX limited to 4096 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_82542_2_0:
   2644 	case WM_T_82542_2_1:
   2645 	case WM_T_82583:
   2646 	case WM_T_ICH8:
   2647 		/* No support for jumbo frame */
   2648 		break;
   2649 	default:
   2650 		/* ETHER_MAX_LEN_JUMBO */
   2651 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2652 		break;
   2653 	}
   2654 
   2655 	/* If we're a i82543 or greater, we can support VLANs. */
   2656 	if (sc->sc_type >= WM_T_82543)
   2657 		sc->sc_ethercom.ec_capabilities |=
   2658 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2659 
   2660 	/*
   2661 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2662 	 * on i82543 and later.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82543) {
   2665 		ifp->if_capabilities |=
   2666 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2667 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2668 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv6_Tx |
   2670 		    IFCAP_CSUM_UDPv6_Tx;
   2671 	}
   2672 
   2673 	/*
   2674 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2675 	 *
   2676 	 *	82541GI (8086:1076) ... no
   2677 	 *	82572EI (8086:10b9) ... yes
   2678 	 */
   2679 	if (sc->sc_type >= WM_T_82571) {
   2680 		ifp->if_capabilities |=
   2681 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2682 	}
   2683 
   2684 	/*
   2685 	 * If we're a i82544 or greater (except i82547), we can do
   2686 	 * TCP segmentation offload.
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2689 		ifp->if_capabilities |= IFCAP_TSOv4;
   2690 	}
   2691 
   2692 	if (sc->sc_type >= WM_T_82571) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv6;
   2694 	}
   2695 
   2696 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2697 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2698 
   2699 #ifdef WM_MPSAFE
   2700 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2701 #else
   2702 	sc->sc_core_lock = NULL;
   2703 #endif
   2704 
   2705 	/* Attach the interface. */
   2706 	if_initialize(ifp);
   2707 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2708 	ether_ifattach(ifp, enaddr);
   2709 	if_register(ifp);
   2710 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2711 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2712 			  RND_FLAG_DEFAULT);
   2713 
   2714 #ifdef WM_EVENT_COUNTERS
   2715 	/* Attach event counters. */
   2716 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2717 	    NULL, xname, "linkintr");
   2718 
   2719 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2720 	    NULL, xname, "tx_xoff");
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xon");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "rx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_macctl");
   2729 #endif /* WM_EVENT_COUNTERS */
   2730 
   2731 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2732 		pmf_class_network_register(self, ifp);
   2733 	else
   2734 		aprint_error_dev(self, "couldn't establish power handler\n");
   2735 
   2736 	sc->sc_flags |= WM_F_ATTACHED;
   2737  out:
   2738 	return;
   2739 }
   2740 
   2741 /* The detach function (ca_detach) */
   2742 static int
   2743 wm_detach(device_t self, int flags __unused)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2747 	int i;
   2748 
   2749 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2750 		return 0;
   2751 
   2752 	/* Stop the interface. Callouts are stopped in it. */
   2753 	wm_stop(ifp, 1);
   2754 
   2755 	pmf_device_deregister(self);
   2756 
   2757 #ifdef WM_EVENT_COUNTERS
   2758 	evcnt_detach(&sc->sc_ev_linkintr);
   2759 
   2760 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2761 	evcnt_detach(&sc->sc_ev_tx_xon);
   2762 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_rx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2765 #endif /* WM_EVENT_COUNTERS */
   2766 
   2767 	/* Tell the firmware about the release */
   2768 	WM_CORE_LOCK(sc);
   2769 	wm_release_manageability(sc);
   2770 	wm_release_hw_control(sc);
   2771 	wm_enable_wakeup(sc);
   2772 	WM_CORE_UNLOCK(sc);
   2773 
   2774 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2775 
   2776 	/* Delete all remaining media. */
   2777 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2778 
   2779 	ether_ifdetach(ifp);
   2780 	if_detach(ifp);
   2781 	if_percpuq_destroy(sc->sc_ipq);
   2782 
   2783 	/* Unload RX dmamaps and free mbufs */
   2784 	for (i = 0; i < sc->sc_nqueues; i++) {
   2785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2786 		mutex_enter(rxq->rxq_lock);
   2787 		wm_rxdrain(rxq);
   2788 		mutex_exit(rxq->rxq_lock);
   2789 	}
   2790 	/* Must unlock here */
   2791 
   2792 	/* Disestablish the interrupt handler */
   2793 	for (i = 0; i < sc->sc_nintrs; i++) {
   2794 		if (sc->sc_ihs[i] != NULL) {
   2795 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2796 			sc->sc_ihs[i] = NULL;
   2797 		}
   2798 	}
   2799 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2800 
   2801 	wm_free_txrx_queues(sc);
   2802 
   2803 	/* Unmap the registers */
   2804 	if (sc->sc_ss) {
   2805 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2806 		sc->sc_ss = 0;
   2807 	}
   2808 	if (sc->sc_ios) {
   2809 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2810 		sc->sc_ios = 0;
   2811 	}
   2812 	if (sc->sc_flashs) {
   2813 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2814 		sc->sc_flashs = 0;
   2815 	}
   2816 
   2817 	if (sc->sc_core_lock)
   2818 		mutex_obj_free(sc->sc_core_lock);
   2819 	if (sc->sc_ich_phymtx)
   2820 		mutex_obj_free(sc->sc_ich_phymtx);
   2821 	if (sc->sc_ich_nvmmtx)
   2822 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2823 
   2824 	return 0;
   2825 }
   2826 
   2827 static bool
   2828 wm_suspend(device_t self, const pmf_qual_t *qual)
   2829 {
   2830 	struct wm_softc *sc = device_private(self);
   2831 
   2832 	wm_release_manageability(sc);
   2833 	wm_release_hw_control(sc);
   2834 	wm_enable_wakeup(sc);
   2835 
   2836 	return true;
   2837 }
   2838 
   2839 static bool
   2840 wm_resume(device_t self, const pmf_qual_t *qual)
   2841 {
   2842 	struct wm_softc *sc = device_private(self);
   2843 
   2844 	wm_init_manageability(sc);
   2845 
   2846 	return true;
   2847 }
   2848 
   2849 /*
   2850  * wm_watchdog:		[ifnet interface function]
   2851  *
   2852  *	Watchdog timer handler.
   2853  */
   2854 static void
   2855 wm_watchdog(struct ifnet *ifp)
   2856 {
   2857 	int qid;
   2858 	struct wm_softc *sc = ifp->if_softc;
   2859 
   2860 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2861 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2862 
   2863 		wm_watchdog_txq(ifp, txq);
   2864 	}
   2865 
   2866 	/* Reset the interface. */
   2867 	(void) wm_init(ifp);
   2868 
   2869 	/*
   2870 	 * There are still some upper layer processing which call
   2871 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2872 	 */
   2873 	/* Try to get more packets going. */
   2874 	ifp->if_start(ifp);
   2875 }
   2876 
   2877 static void
   2878 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2879 {
   2880 	struct wm_softc *sc = ifp->if_softc;
   2881 
   2882 	/*
   2883 	 * Since we're using delayed interrupts, sweep up
   2884 	 * before we report an error.
   2885 	 */
   2886 	mutex_enter(txq->txq_lock);
   2887 	wm_txeof(sc, txq);
   2888 	mutex_exit(txq->txq_lock);
   2889 
   2890 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2891 #ifdef WM_DEBUG
   2892 		int i, j;
   2893 		struct wm_txsoft *txs;
   2894 #endif
   2895 		log(LOG_ERR,
   2896 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2897 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2898 		    txq->txq_next);
   2899 		ifp->if_oerrors++;
   2900 #ifdef WM_DEBUG
   2901 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2902 		    i = WM_NEXTTXS(txq, i)) {
   2903 		    txs = &txq->txq_soft[i];
   2904 		    printf("txs %d tx %d -> %d\n",
   2905 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2906 		    for (j = txs->txs_firstdesc; ;
   2907 			j = WM_NEXTTX(txq, j)) {
   2908 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2909 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2910 			printf("\t %#08x%08x\n",
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2913 			if (j == txs->txs_lastdesc)
   2914 				break;
   2915 			}
   2916 		}
   2917 #endif
   2918 	}
   2919 }
   2920 
   2921 /*
   2922  * wm_tick:
   2923  *
   2924  *	One second timer, used to check link status, sweep up
   2925  *	completed transmit jobs, etc.
   2926  */
   2927 static void
   2928 wm_tick(void *arg)
   2929 {
   2930 	struct wm_softc *sc = arg;
   2931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2932 #ifndef WM_MPSAFE
   2933 	int s = splnet();
   2934 #endif
   2935 
   2936 	WM_CORE_LOCK(sc);
   2937 
   2938 	if (sc->sc_core_stopping)
   2939 		goto out;
   2940 
   2941 	if (sc->sc_type >= WM_T_82542_2_1) {
   2942 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2943 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2947 	}
   2948 
   2949 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2950 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2951 	    + CSR_READ(sc, WMREG_CRCERRS)
   2952 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2953 	    + CSR_READ(sc, WMREG_SYMERRC)
   2954 	    + CSR_READ(sc, WMREG_RXERRC)
   2955 	    + CSR_READ(sc, WMREG_SEC)
   2956 	    + CSR_READ(sc, WMREG_CEXTERR)
   2957 	    + CSR_READ(sc, WMREG_RLEC);
   2958 	/*
   2959 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2960 	 * memory. It does not mean the number of dropped packet. Because
   2961 	 * ethernet controller can receive packets in such case if there is
   2962 	 * space in phy's FIFO.
   2963 	 *
   2964 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2965 	 * own EVCNT instead of if_iqdrops.
   2966 	 */
   2967 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2968 
   2969 	if (sc->sc_flags & WM_F_HAS_MII)
   2970 		mii_tick(&sc->sc_mii);
   2971 	else if ((sc->sc_type >= WM_T_82575)
   2972 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2973 		wm_serdes_tick(sc);
   2974 	else
   2975 		wm_tbi_tick(sc);
   2976 
   2977 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2978 out:
   2979 	WM_CORE_UNLOCK(sc);
   2980 #ifndef WM_MPSAFE
   2981 	splx(s);
   2982 #endif
   2983 }
   2984 
   2985 static int
   2986 wm_ifflags_cb(struct ethercom *ec)
   2987 {
   2988 	struct ifnet *ifp = &ec->ec_if;
   2989 	struct wm_softc *sc = ifp->if_softc;
   2990 	int rc = 0;
   2991 
   2992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2993 		device_xname(sc->sc_dev), __func__));
   2994 
   2995 	WM_CORE_LOCK(sc);
   2996 
   2997 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2998 	sc->sc_if_flags = ifp->if_flags;
   2999 
   3000 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3001 		rc = ENETRESET;
   3002 		goto out;
   3003 	}
   3004 
   3005 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3006 		wm_set_filter(sc);
   3007 
   3008 	wm_set_vlan(sc);
   3009 
   3010 out:
   3011 	WM_CORE_UNLOCK(sc);
   3012 
   3013 	return rc;
   3014 }
   3015 
   3016 /*
   3017  * wm_ioctl:		[ifnet interface function]
   3018  *
   3019  *	Handle control requests from the operator.
   3020  */
   3021 static int
   3022 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct ifreq *ifr = (struct ifreq *) data;
   3026 	struct ifaddr *ifa = (struct ifaddr *)data;
   3027 	struct sockaddr_dl *sdl;
   3028 	int s, error;
   3029 
   3030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3031 		device_xname(sc->sc_dev), __func__));
   3032 
   3033 #ifndef WM_MPSAFE
   3034 	s = splnet();
   3035 #endif
   3036 	switch (cmd) {
   3037 	case SIOCSIFMEDIA:
   3038 	case SIOCGIFMEDIA:
   3039 		WM_CORE_LOCK(sc);
   3040 		/* Flow control requires full-duplex mode. */
   3041 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3042 		    (ifr->ifr_media & IFM_FDX) == 0)
   3043 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3044 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3045 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3046 				/* We can do both TXPAUSE and RXPAUSE. */
   3047 				ifr->ifr_media |=
   3048 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3049 			}
   3050 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3051 		}
   3052 		WM_CORE_UNLOCK(sc);
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3057 #ifdef WM_MPSAFE
   3058 		splx(s);
   3059 #endif
   3060 		break;
   3061 	case SIOCINITIFADDR:
   3062 		WM_CORE_LOCK(sc);
   3063 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3064 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3065 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3066 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3067 			/* unicast address is first multicast entry */
   3068 			wm_set_filter(sc);
   3069 			error = 0;
   3070 			WM_CORE_UNLOCK(sc);
   3071 			break;
   3072 		}
   3073 		WM_CORE_UNLOCK(sc);
   3074 		/*FALLTHROUGH*/
   3075 	default:
   3076 #ifdef WM_MPSAFE
   3077 		s = splnet();
   3078 #endif
   3079 		/* It may call wm_start, so unlock here */
   3080 		error = ether_ioctl(ifp, cmd, data);
   3081 #ifdef WM_MPSAFE
   3082 		splx(s);
   3083 #endif
   3084 		if (error != ENETRESET)
   3085 			break;
   3086 
   3087 		error = 0;
   3088 
   3089 		if (cmd == SIOCSIFCAP) {
   3090 			error = (*ifp->if_init)(ifp);
   3091 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3092 			;
   3093 		else if (ifp->if_flags & IFF_RUNNING) {
   3094 			/*
   3095 			 * Multicast list has changed; set the hardware filter
   3096 			 * accordingly.
   3097 			 */
   3098 			WM_CORE_LOCK(sc);
   3099 			wm_set_filter(sc);
   3100 			WM_CORE_UNLOCK(sc);
   3101 		}
   3102 		break;
   3103 	}
   3104 
   3105 #ifndef WM_MPSAFE
   3106 	splx(s);
   3107 #endif
   3108 	return error;
   3109 }
   3110 
   3111 /* MAC address related */
   3112 
   3113 /*
   3114  * Get the offset of MAC address and return it.
   3115  * If error occured, use offset 0.
   3116  */
   3117 static uint16_t
   3118 wm_check_alt_mac_addr(struct wm_softc *sc)
   3119 {
   3120 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3121 	uint16_t offset = NVM_OFF_MACADDR;
   3122 
   3123 	/* Try to read alternative MAC address pointer */
   3124 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3125 		return 0;
   3126 
   3127 	/* Check pointer if it's valid or not. */
   3128 	if ((offset == 0x0000) || (offset == 0xffff))
   3129 		return 0;
   3130 
   3131 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3132 	/*
   3133 	 * Check whether alternative MAC address is valid or not.
   3134 	 * Some cards have non 0xffff pointer but those don't use
   3135 	 * alternative MAC address in reality.
   3136 	 *
   3137 	 * Check whether the broadcast bit is set or not.
   3138 	 */
   3139 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3140 		if (((myea[0] & 0xff) & 0x01) == 0)
   3141 			return offset; /* Found */
   3142 
   3143 	/* Not found */
   3144 	return 0;
   3145 }
   3146 
   3147 static int
   3148 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3149 {
   3150 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3151 	uint16_t offset = NVM_OFF_MACADDR;
   3152 	int do_invert = 0;
   3153 
   3154 	switch (sc->sc_type) {
   3155 	case WM_T_82580:
   3156 	case WM_T_I350:
   3157 	case WM_T_I354:
   3158 		/* EEPROM Top Level Partitioning */
   3159 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3160 		break;
   3161 	case WM_T_82571:
   3162 	case WM_T_82575:
   3163 	case WM_T_82576:
   3164 	case WM_T_80003:
   3165 	case WM_T_I210:
   3166 	case WM_T_I211:
   3167 		offset = wm_check_alt_mac_addr(sc);
   3168 		if (offset == 0)
   3169 			if ((sc->sc_funcid & 0x01) == 1)
   3170 				do_invert = 1;
   3171 		break;
   3172 	default:
   3173 		if ((sc->sc_funcid & 0x01) == 1)
   3174 			do_invert = 1;
   3175 		break;
   3176 	}
   3177 
   3178 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3179 		goto bad;
   3180 
   3181 	enaddr[0] = myea[0] & 0xff;
   3182 	enaddr[1] = myea[0] >> 8;
   3183 	enaddr[2] = myea[1] & 0xff;
   3184 	enaddr[3] = myea[1] >> 8;
   3185 	enaddr[4] = myea[2] & 0xff;
   3186 	enaddr[5] = myea[2] >> 8;
   3187 
   3188 	/*
   3189 	 * Toggle the LSB of the MAC address on the second port
   3190 	 * of some dual port cards.
   3191 	 */
   3192 	if (do_invert != 0)
   3193 		enaddr[5] ^= 1;
   3194 
   3195 	return 0;
   3196 
   3197  bad:
   3198 	return -1;
   3199 }
   3200 
   3201 /*
   3202  * wm_set_ral:
   3203  *
   3204  *	Set an entery in the receive address list.
   3205  */
   3206 static void
   3207 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3208 {
   3209 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3210 	uint32_t wlock_mac;
   3211 	int rv;
   3212 
   3213 	if (enaddr != NULL) {
   3214 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3215 		    (enaddr[3] << 24);
   3216 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3217 		ral_hi |= RAL_AV;
   3218 	} else {
   3219 		ral_lo = 0;
   3220 		ral_hi = 0;
   3221 	}
   3222 
   3223 	switch (sc->sc_type) {
   3224 	case WM_T_82542_2_0:
   3225 	case WM_T_82542_2_1:
   3226 	case WM_T_82543:
   3227 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3228 		CSR_WRITE_FLUSH(sc);
   3229 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3230 		CSR_WRITE_FLUSH(sc);
   3231 		break;
   3232 	case WM_T_PCH2:
   3233 	case WM_T_PCH_LPT:
   3234 	case WM_T_PCH_SPT:
   3235 		if (idx == 0) {
   3236 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3237 			CSR_WRITE_FLUSH(sc);
   3238 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3239 			CSR_WRITE_FLUSH(sc);
   3240 			return;
   3241 		}
   3242 		if (sc->sc_type != WM_T_PCH2) {
   3243 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3244 			    FWSM_WLOCK_MAC);
   3245 			addrl = WMREG_SHRAL(idx - 1);
   3246 			addrh = WMREG_SHRAH(idx - 1);
   3247 		} else {
   3248 			wlock_mac = 0;
   3249 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3250 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3251 		}
   3252 
   3253 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3254 			rv = wm_get_swflag_ich8lan(sc);
   3255 			if (rv != 0)
   3256 				return;
   3257 			CSR_WRITE(sc, addrl, ral_lo);
   3258 			CSR_WRITE_FLUSH(sc);
   3259 			CSR_WRITE(sc, addrh, ral_hi);
   3260 			CSR_WRITE_FLUSH(sc);
   3261 			wm_put_swflag_ich8lan(sc);
   3262 		}
   3263 
   3264 		break;
   3265 	default:
   3266 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3267 		CSR_WRITE_FLUSH(sc);
   3268 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3269 		CSR_WRITE_FLUSH(sc);
   3270 		break;
   3271 	}
   3272 }
   3273 
   3274 /*
   3275  * wm_mchash:
   3276  *
   3277  *	Compute the hash of the multicast address for the 4096-bit
   3278  *	multicast filter.
   3279  */
   3280 static uint32_t
   3281 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3282 {
   3283 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3284 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3285 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3286 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3287 	uint32_t hash;
   3288 
   3289 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3290 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3291 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3292 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3293 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3294 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3295 		return (hash & 0x3ff);
   3296 	}
   3297 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3298 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3299 
   3300 	return (hash & 0xfff);
   3301 }
   3302 
   3303 /*
   3304  * wm_set_filter:
   3305  *
   3306  *	Set up the receive filter.
   3307  */
   3308 static void
   3309 wm_set_filter(struct wm_softc *sc)
   3310 {
   3311 	struct ethercom *ec = &sc->sc_ethercom;
   3312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3313 	struct ether_multi *enm;
   3314 	struct ether_multistep step;
   3315 	bus_addr_t mta_reg;
   3316 	uint32_t hash, reg, bit;
   3317 	int i, size, ralmax;
   3318 
   3319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3320 		device_xname(sc->sc_dev), __func__));
   3321 
   3322 	if (sc->sc_type >= WM_T_82544)
   3323 		mta_reg = WMREG_CORDOVA_MTA;
   3324 	else
   3325 		mta_reg = WMREG_MTA;
   3326 
   3327 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3328 
   3329 	if (ifp->if_flags & IFF_BROADCAST)
   3330 		sc->sc_rctl |= RCTL_BAM;
   3331 	if (ifp->if_flags & IFF_PROMISC) {
   3332 		sc->sc_rctl |= RCTL_UPE;
   3333 		goto allmulti;
   3334 	}
   3335 
   3336 	/*
   3337 	 * Set the station address in the first RAL slot, and
   3338 	 * clear the remaining slots.
   3339 	 */
   3340 	if (sc->sc_type == WM_T_ICH8)
   3341 		size = WM_RAL_TABSIZE_ICH8 -1;
   3342 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3343 	    || (sc->sc_type == WM_T_PCH))
   3344 		size = WM_RAL_TABSIZE_ICH8;
   3345 	else if (sc->sc_type == WM_T_PCH2)
   3346 		size = WM_RAL_TABSIZE_PCH2;
   3347 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3348 		size = WM_RAL_TABSIZE_PCH_LPT;
   3349 	else if (sc->sc_type == WM_T_82575)
   3350 		size = WM_RAL_TABSIZE_82575;
   3351 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3352 		size = WM_RAL_TABSIZE_82576;
   3353 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3354 		size = WM_RAL_TABSIZE_I350;
   3355 	else
   3356 		size = WM_RAL_TABSIZE;
   3357 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3358 
   3359 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3360 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3361 		switch (i) {
   3362 		case 0:
   3363 			/* We can use all entries */
   3364 			ralmax = size;
   3365 			break;
   3366 		case 1:
   3367 			/* Only RAR[0] */
   3368 			ralmax = 1;
   3369 			break;
   3370 		default:
   3371 			/* available SHRA + RAR[0] */
   3372 			ralmax = i + 1;
   3373 		}
   3374 	} else
   3375 		ralmax = size;
   3376 	for (i = 1; i < size; i++) {
   3377 		if (i < ralmax)
   3378 			wm_set_ral(sc, NULL, i);
   3379 	}
   3380 
   3381 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3382 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3383 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3384 	    || (sc->sc_type == WM_T_PCH_SPT))
   3385 		size = WM_ICH8_MC_TABSIZE;
   3386 	else
   3387 		size = WM_MC_TABSIZE;
   3388 	/* Clear out the multicast table. */
   3389 	for (i = 0; i < size; i++) {
   3390 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3391 		CSR_WRITE_FLUSH(sc);
   3392 	}
   3393 
   3394 	ETHER_LOCK(ec);
   3395 	ETHER_FIRST_MULTI(step, ec, enm);
   3396 	while (enm != NULL) {
   3397 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3398 			ETHER_UNLOCK(ec);
   3399 			/*
   3400 			 * We must listen to a range of multicast addresses.
   3401 			 * For now, just accept all multicasts, rather than
   3402 			 * trying to set only those filter bits needed to match
   3403 			 * the range.  (At this time, the only use of address
   3404 			 * ranges is for IP multicast routing, for which the
   3405 			 * range is big enough to require all bits set.)
   3406 			 */
   3407 			goto allmulti;
   3408 		}
   3409 
   3410 		hash = wm_mchash(sc, enm->enm_addrlo);
   3411 
   3412 		reg = (hash >> 5);
   3413 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3414 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3415 		    || (sc->sc_type == WM_T_PCH2)
   3416 		    || (sc->sc_type == WM_T_PCH_LPT)
   3417 		    || (sc->sc_type == WM_T_PCH_SPT))
   3418 			reg &= 0x1f;
   3419 		else
   3420 			reg &= 0x7f;
   3421 		bit = hash & 0x1f;
   3422 
   3423 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3424 		hash |= 1U << bit;
   3425 
   3426 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3427 			/*
   3428 			 * 82544 Errata 9: Certain register cannot be written
   3429 			 * with particular alignments in PCI-X bus operation
   3430 			 * (FCAH, MTA and VFTA).
   3431 			 */
   3432 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3433 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3434 			CSR_WRITE_FLUSH(sc);
   3435 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3436 			CSR_WRITE_FLUSH(sc);
   3437 		} else {
   3438 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3439 			CSR_WRITE_FLUSH(sc);
   3440 		}
   3441 
   3442 		ETHER_NEXT_MULTI(step, enm);
   3443 	}
   3444 	ETHER_UNLOCK(ec);
   3445 
   3446 	ifp->if_flags &= ~IFF_ALLMULTI;
   3447 	goto setit;
   3448 
   3449  allmulti:
   3450 	ifp->if_flags |= IFF_ALLMULTI;
   3451 	sc->sc_rctl |= RCTL_MPE;
   3452 
   3453  setit:
   3454 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3455 }
   3456 
   3457 /* Reset and init related */
   3458 
   3459 static void
   3460 wm_set_vlan(struct wm_softc *sc)
   3461 {
   3462 
   3463 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3464 		device_xname(sc->sc_dev), __func__));
   3465 
   3466 	/* Deal with VLAN enables. */
   3467 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3468 		sc->sc_ctrl |= CTRL_VME;
   3469 	else
   3470 		sc->sc_ctrl &= ~CTRL_VME;
   3471 
   3472 	/* Write the control registers. */
   3473 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3474 }
   3475 
   3476 static void
   3477 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3478 {
   3479 	uint32_t gcr;
   3480 	pcireg_t ctrl2;
   3481 
   3482 	gcr = CSR_READ(sc, WMREG_GCR);
   3483 
   3484 	/* Only take action if timeout value is defaulted to 0 */
   3485 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3486 		goto out;
   3487 
   3488 	if ((gcr & GCR_CAP_VER2) == 0) {
   3489 		gcr |= GCR_CMPL_TMOUT_10MS;
   3490 		goto out;
   3491 	}
   3492 
   3493 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3494 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3495 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3496 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3497 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3498 
   3499 out:
   3500 	/* Disable completion timeout resend */
   3501 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3502 
   3503 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3504 }
   3505 
   3506 void
   3507 wm_get_auto_rd_done(struct wm_softc *sc)
   3508 {
   3509 	int i;
   3510 
   3511 	/* wait for eeprom to reload */
   3512 	switch (sc->sc_type) {
   3513 	case WM_T_82571:
   3514 	case WM_T_82572:
   3515 	case WM_T_82573:
   3516 	case WM_T_82574:
   3517 	case WM_T_82583:
   3518 	case WM_T_82575:
   3519 	case WM_T_82576:
   3520 	case WM_T_82580:
   3521 	case WM_T_I350:
   3522 	case WM_T_I354:
   3523 	case WM_T_I210:
   3524 	case WM_T_I211:
   3525 	case WM_T_80003:
   3526 	case WM_T_ICH8:
   3527 	case WM_T_ICH9:
   3528 		for (i = 0; i < 10; i++) {
   3529 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3530 				break;
   3531 			delay(1000);
   3532 		}
   3533 		if (i == 10) {
   3534 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3535 			    "complete\n", device_xname(sc->sc_dev));
   3536 		}
   3537 		break;
   3538 	default:
   3539 		break;
   3540 	}
   3541 }
   3542 
   3543 void
   3544 wm_lan_init_done(struct wm_softc *sc)
   3545 {
   3546 	uint32_t reg = 0;
   3547 	int i;
   3548 
   3549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3550 		device_xname(sc->sc_dev), __func__));
   3551 
   3552 	/* Wait for eeprom to reload */
   3553 	switch (sc->sc_type) {
   3554 	case WM_T_ICH10:
   3555 	case WM_T_PCH:
   3556 	case WM_T_PCH2:
   3557 	case WM_T_PCH_LPT:
   3558 	case WM_T_PCH_SPT:
   3559 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3560 			reg = CSR_READ(sc, WMREG_STATUS);
   3561 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3562 				break;
   3563 			delay(100);
   3564 		}
   3565 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3566 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3567 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3568 		}
   3569 		break;
   3570 	default:
   3571 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3572 		    __func__);
   3573 		break;
   3574 	}
   3575 
   3576 	reg &= ~STATUS_LAN_INIT_DONE;
   3577 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3578 }
   3579 
   3580 void
   3581 wm_get_cfg_done(struct wm_softc *sc)
   3582 {
   3583 	int mask;
   3584 	uint32_t reg;
   3585 	int i;
   3586 
   3587 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3588 		device_xname(sc->sc_dev), __func__));
   3589 
   3590 	/* Wait for eeprom to reload */
   3591 	switch (sc->sc_type) {
   3592 	case WM_T_82542_2_0:
   3593 	case WM_T_82542_2_1:
   3594 		/* null */
   3595 		break;
   3596 	case WM_T_82543:
   3597 	case WM_T_82544:
   3598 	case WM_T_82540:
   3599 	case WM_T_82545:
   3600 	case WM_T_82545_3:
   3601 	case WM_T_82546:
   3602 	case WM_T_82546_3:
   3603 	case WM_T_82541:
   3604 	case WM_T_82541_2:
   3605 	case WM_T_82547:
   3606 	case WM_T_82547_2:
   3607 	case WM_T_82573:
   3608 	case WM_T_82574:
   3609 	case WM_T_82583:
   3610 		/* generic */
   3611 		delay(10*1000);
   3612 		break;
   3613 	case WM_T_80003:
   3614 	case WM_T_82571:
   3615 	case WM_T_82572:
   3616 	case WM_T_82575:
   3617 	case WM_T_82576:
   3618 	case WM_T_82580:
   3619 	case WM_T_I350:
   3620 	case WM_T_I354:
   3621 	case WM_T_I210:
   3622 	case WM_T_I211:
   3623 		if (sc->sc_type == WM_T_82571) {
   3624 			/* Only 82571 shares port 0 */
   3625 			mask = EEMNGCTL_CFGDONE_0;
   3626 		} else
   3627 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3628 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3629 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3630 				break;
   3631 			delay(1000);
   3632 		}
   3633 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3634 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3635 				device_xname(sc->sc_dev), __func__));
   3636 		}
   3637 		break;
   3638 	case WM_T_ICH8:
   3639 	case WM_T_ICH9:
   3640 	case WM_T_ICH10:
   3641 	case WM_T_PCH:
   3642 	case WM_T_PCH2:
   3643 	case WM_T_PCH_LPT:
   3644 	case WM_T_PCH_SPT:
   3645 		delay(10*1000);
   3646 		if (sc->sc_type >= WM_T_ICH10)
   3647 			wm_lan_init_done(sc);
   3648 		else
   3649 			wm_get_auto_rd_done(sc);
   3650 
   3651 		reg = CSR_READ(sc, WMREG_STATUS);
   3652 		if ((reg & STATUS_PHYRA) != 0)
   3653 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3654 		break;
   3655 	default:
   3656 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3657 		    __func__);
   3658 		break;
   3659 	}
   3660 }
   3661 
   3662 void
   3663 wm_phy_post_reset(struct wm_softc *sc)
   3664 {
   3665 	uint32_t reg;
   3666 
   3667 	/* This function is only for ICH8 and newer. */
   3668 	if (sc->sc_type < WM_T_ICH8)
   3669 		return;
   3670 
   3671 	if (wm_phy_resetisblocked(sc)) {
   3672 		/* XXX */
   3673 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3674 		return;
   3675 	}
   3676 
   3677 	/* Allow time for h/w to get to quiescent state after reset */
   3678 	delay(10*1000);
   3679 
   3680 	/* Perform any necessary post-reset workarounds */
   3681 	if (sc->sc_type == WM_T_PCH)
   3682 		wm_hv_phy_workaround_ich8lan(sc);
   3683 	if (sc->sc_type == WM_T_PCH2)
   3684 		wm_lv_phy_workaround_ich8lan(sc);
   3685 
   3686 	/* Clear the host wakeup bit after lcd reset */
   3687 	if (sc->sc_type >= WM_T_PCH) {
   3688 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3689 		    BM_PORT_GEN_CFG);
   3690 		reg &= ~BM_WUC_HOST_WU_BIT;
   3691 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3692 		    BM_PORT_GEN_CFG, reg);
   3693 	}
   3694 
   3695 	/*
   3696 	 * XXX Configure the LCD with th extended configuration region
   3697 	 * in NVM
   3698 	 */
   3699 
   3700 	/* Configure the LCD with the OEM bits in NVM */
   3701 }
   3702 
   3703 /* Init hardware bits */
   3704 void
   3705 wm_initialize_hardware_bits(struct wm_softc *sc)
   3706 {
   3707 	uint32_t tarc0, tarc1, reg;
   3708 
   3709 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3710 		device_xname(sc->sc_dev), __func__));
   3711 
   3712 	/* For 82571 variant, 80003 and ICHs */
   3713 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3714 	    || (sc->sc_type >= WM_T_80003)) {
   3715 
   3716 		/* Transmit Descriptor Control 0 */
   3717 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3718 		reg |= TXDCTL_COUNT_DESC;
   3719 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3720 
   3721 		/* Transmit Descriptor Control 1 */
   3722 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3723 		reg |= TXDCTL_COUNT_DESC;
   3724 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3725 
   3726 		/* TARC0 */
   3727 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3728 		switch (sc->sc_type) {
   3729 		case WM_T_82571:
   3730 		case WM_T_82572:
   3731 		case WM_T_82573:
   3732 		case WM_T_82574:
   3733 		case WM_T_82583:
   3734 		case WM_T_80003:
   3735 			/* Clear bits 30..27 */
   3736 			tarc0 &= ~__BITS(30, 27);
   3737 			break;
   3738 		default:
   3739 			break;
   3740 		}
   3741 
   3742 		switch (sc->sc_type) {
   3743 		case WM_T_82571:
   3744 		case WM_T_82572:
   3745 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3746 
   3747 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3748 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3749 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3750 			/* 8257[12] Errata No.7 */
   3751 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3752 
   3753 			/* TARC1 bit 28 */
   3754 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3755 				tarc1 &= ~__BIT(28);
   3756 			else
   3757 				tarc1 |= __BIT(28);
   3758 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3759 
   3760 			/*
   3761 			 * 8257[12] Errata No.13
   3762 			 * Disable Dyamic Clock Gating.
   3763 			 */
   3764 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3765 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3766 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3767 			break;
   3768 		case WM_T_82573:
   3769 		case WM_T_82574:
   3770 		case WM_T_82583:
   3771 			if ((sc->sc_type == WM_T_82574)
   3772 			    || (sc->sc_type == WM_T_82583))
   3773 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3774 
   3775 			/* Extended Device Control */
   3776 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3777 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3778 			reg |= __BIT(22);	/* Set bit 22 */
   3779 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3780 
   3781 			/* Device Control */
   3782 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3783 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3784 
   3785 			/* PCIe Control Register */
   3786 			/*
   3787 			 * 82573 Errata (unknown).
   3788 			 *
   3789 			 * 82574 Errata 25 and 82583 Errata 12
   3790 			 * "Dropped Rx Packets":
   3791 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3792 			 */
   3793 			reg = CSR_READ(sc, WMREG_GCR);
   3794 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3795 			CSR_WRITE(sc, WMREG_GCR, reg);
   3796 
   3797 			if ((sc->sc_type == WM_T_82574)
   3798 			    || (sc->sc_type == WM_T_82583)) {
   3799 				/*
   3800 				 * Document says this bit must be set for
   3801 				 * proper operation.
   3802 				 */
   3803 				reg = CSR_READ(sc, WMREG_GCR);
   3804 				reg |= __BIT(22);
   3805 				CSR_WRITE(sc, WMREG_GCR, reg);
   3806 
   3807 				/*
   3808 				 * Apply workaround for hardware errata
   3809 				 * documented in errata docs Fixes issue where
   3810 				 * some error prone or unreliable PCIe
   3811 				 * completions are occurring, particularly
   3812 				 * with ASPM enabled. Without fix, issue can
   3813 				 * cause Tx timeouts.
   3814 				 */
   3815 				reg = CSR_READ(sc, WMREG_GCR2);
   3816 				reg |= __BIT(0);
   3817 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3818 			}
   3819 			break;
   3820 		case WM_T_80003:
   3821 			/* TARC0 */
   3822 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3823 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3824 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3825 
   3826 			/* TARC1 bit 28 */
   3827 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3828 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3829 				tarc1 &= ~__BIT(28);
   3830 			else
   3831 				tarc1 |= __BIT(28);
   3832 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3833 			break;
   3834 		case WM_T_ICH8:
   3835 		case WM_T_ICH9:
   3836 		case WM_T_ICH10:
   3837 		case WM_T_PCH:
   3838 		case WM_T_PCH2:
   3839 		case WM_T_PCH_LPT:
   3840 		case WM_T_PCH_SPT:
   3841 			/* TARC0 */
   3842 			if ((sc->sc_type == WM_T_ICH8)
   3843 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3844 				/* Set TARC0 bits 29 and 28 */
   3845 				tarc0 |= __BITS(29, 28);
   3846 			}
   3847 			/* Set TARC0 bits 23,24,26,27 */
   3848 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3849 
   3850 			/* CTRL_EXT */
   3851 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3852 			reg |= __BIT(22);	/* Set bit 22 */
   3853 			/*
   3854 			 * Enable PHY low-power state when MAC is at D3
   3855 			 * w/o WoL
   3856 			 */
   3857 			if (sc->sc_type >= WM_T_PCH)
   3858 				reg |= CTRL_EXT_PHYPDEN;
   3859 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3860 
   3861 			/* TARC1 */
   3862 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3863 			/* bit 28 */
   3864 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3865 				tarc1 &= ~__BIT(28);
   3866 			else
   3867 				tarc1 |= __BIT(28);
   3868 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3869 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3870 
   3871 			/* Device Status */
   3872 			if (sc->sc_type == WM_T_ICH8) {
   3873 				reg = CSR_READ(sc, WMREG_STATUS);
   3874 				reg &= ~__BIT(31);
   3875 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3876 
   3877 			}
   3878 
   3879 			/* IOSFPC */
   3880 			if (sc->sc_type == WM_T_PCH_SPT) {
   3881 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3882 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3883 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3884 			}
   3885 			/*
   3886 			 * Work-around descriptor data corruption issue during
   3887 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3888 			 * capability.
   3889 			 */
   3890 			reg = CSR_READ(sc, WMREG_RFCTL);
   3891 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3892 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3893 			break;
   3894 		default:
   3895 			break;
   3896 		}
   3897 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3898 
   3899 		switch (sc->sc_type) {
   3900 		/*
   3901 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3902 		 * Avoid RSS Hash Value bug.
   3903 		 */
   3904 		case WM_T_82571:
   3905 		case WM_T_82572:
   3906 		case WM_T_82573:
   3907 		case WM_T_80003:
   3908 		case WM_T_ICH8:
   3909 			reg = CSR_READ(sc, WMREG_RFCTL);
   3910 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3911 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3912 			break;
   3913 		case WM_T_82574:
   3914 			/* use extened Rx descriptor. */
   3915 			reg = CSR_READ(sc, WMREG_RFCTL);
   3916 			reg |= WMREG_RFCTL_EXSTEN;
   3917 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3918 			break;
   3919 		default:
   3920 			break;
   3921 		}
   3922 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3923 		/*
   3924 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3925 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3926 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3927 		 * Correctly by the Device"
   3928 		 *
   3929 		 * I354(C2000) Errata AVR53:
   3930 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3931 		 * Hang"
   3932 		 */
   3933 		reg = CSR_READ(sc, WMREG_RFCTL);
   3934 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3935 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3936 	}
   3937 }
   3938 
   3939 static uint32_t
   3940 wm_rxpbs_adjust_82580(uint32_t val)
   3941 {
   3942 	uint32_t rv = 0;
   3943 
   3944 	if (val < __arraycount(wm_82580_rxpbs_table))
   3945 		rv = wm_82580_rxpbs_table[val];
   3946 
   3947 	return rv;
   3948 }
   3949 
   3950 /*
   3951  * wm_reset_phy:
   3952  *
   3953  *	generic PHY reset function.
   3954  *	Same as e1000_phy_hw_reset_generic()
   3955  */
   3956 static void
   3957 wm_reset_phy(struct wm_softc *sc)
   3958 {
   3959 	uint32_t reg;
   3960 
   3961 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3962 		device_xname(sc->sc_dev), __func__));
   3963 	if (wm_phy_resetisblocked(sc))
   3964 		return;
   3965 
   3966 	sc->phy.acquire(sc);
   3967 
   3968 	reg = CSR_READ(sc, WMREG_CTRL);
   3969 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3970 	CSR_WRITE_FLUSH(sc);
   3971 
   3972 	delay(sc->phy.reset_delay_us);
   3973 
   3974 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3975 	CSR_WRITE_FLUSH(sc);
   3976 
   3977 	delay(150);
   3978 
   3979 	sc->phy.release(sc);
   3980 
   3981 	wm_get_cfg_done(sc);
   3982 	wm_phy_post_reset(sc);
   3983 }
   3984 
   3985 static void
   3986 wm_flush_desc_rings(struct wm_softc *sc)
   3987 {
   3988 	pcireg_t preg;
   3989 	uint32_t reg;
   3990 	int nexttx;
   3991 
   3992 	/* First, disable MULR fix in FEXTNVM11 */
   3993 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3994 	reg |= FEXTNVM11_DIS_MULRFIX;
   3995 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3996 
   3997 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3998 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3999 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   4000 		struct wm_txqueue *txq;
   4001 		wiseman_txdesc_t *txd;
   4002 
   4003 		/* TX */
   4004 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4005 		    device_xname(sc->sc_dev), preg, reg);
   4006 		reg = CSR_READ(sc, WMREG_TCTL);
   4007 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4008 
   4009 		txq = &sc->sc_queue[0].wmq_txq;
   4010 		nexttx = txq->txq_next;
   4011 		txd = &txq->txq_descs[nexttx];
   4012 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4013 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4014 		txd->wtx_fields.wtxu_status = 0;
   4015 		txd->wtx_fields.wtxu_options = 0;
   4016 		txd->wtx_fields.wtxu_vlan = 0;
   4017 
   4018 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4019 			BUS_SPACE_BARRIER_WRITE);
   4020 
   4021 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4022 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4023 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4024 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4025 		delay(250);
   4026 	}
   4027 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4028 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   4029 		uint32_t rctl;
   4030 
   4031 		/* RX */
   4032 		printf("%s: Need RX flush (reg = %08x)\n",
   4033 		    device_xname(sc->sc_dev), preg);
   4034 		rctl = CSR_READ(sc, WMREG_RCTL);
   4035 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4036 		CSR_WRITE_FLUSH(sc);
   4037 		delay(150);
   4038 
   4039 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4040 		/* zero the lower 14 bits (prefetch and host thresholds) */
   4041 		reg &= 0xffffc000;
   4042 		/*
   4043 		 * update thresholds: prefetch threshold to 31, host threshold
   4044 		 * to 1 and make sure the granularity is "descriptors" and not
   4045 		 * "cache lines"
   4046 		 */
   4047 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4048 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4049 
   4050 		/*
   4051 		 * momentarily enable the RX ring for the changes to take
   4052 		 * effect
   4053 		 */
   4054 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4055 		CSR_WRITE_FLUSH(sc);
   4056 		delay(150);
   4057 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4058 	}
   4059 }
   4060 
   4061 /*
   4062  * wm_reset:
   4063  *
   4064  *	Reset the i82542 chip.
   4065  */
   4066 static void
   4067 wm_reset(struct wm_softc *sc)
   4068 {
   4069 	int phy_reset = 0;
   4070 	int i, error = 0;
   4071 	uint32_t reg;
   4072 
   4073 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4074 		device_xname(sc->sc_dev), __func__));
   4075 	KASSERT(sc->sc_type != 0);
   4076 
   4077 	/*
   4078 	 * Allocate on-chip memory according to the MTU size.
   4079 	 * The Packet Buffer Allocation register must be written
   4080 	 * before the chip is reset.
   4081 	 */
   4082 	switch (sc->sc_type) {
   4083 	case WM_T_82547:
   4084 	case WM_T_82547_2:
   4085 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4086 		    PBA_22K : PBA_30K;
   4087 		for (i = 0; i < sc->sc_nqueues; i++) {
   4088 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4089 			txq->txq_fifo_head = 0;
   4090 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4091 			txq->txq_fifo_size =
   4092 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4093 			txq->txq_fifo_stall = 0;
   4094 		}
   4095 		break;
   4096 	case WM_T_82571:
   4097 	case WM_T_82572:
   4098 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4099 	case WM_T_80003:
   4100 		sc->sc_pba = PBA_32K;
   4101 		break;
   4102 	case WM_T_82573:
   4103 		sc->sc_pba = PBA_12K;
   4104 		break;
   4105 	case WM_T_82574:
   4106 	case WM_T_82583:
   4107 		sc->sc_pba = PBA_20K;
   4108 		break;
   4109 	case WM_T_82576:
   4110 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4111 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4112 		break;
   4113 	case WM_T_82580:
   4114 	case WM_T_I350:
   4115 	case WM_T_I354:
   4116 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4117 		break;
   4118 	case WM_T_I210:
   4119 	case WM_T_I211:
   4120 		sc->sc_pba = PBA_34K;
   4121 		break;
   4122 	case WM_T_ICH8:
   4123 		/* Workaround for a bit corruption issue in FIFO memory */
   4124 		sc->sc_pba = PBA_8K;
   4125 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4126 		break;
   4127 	case WM_T_ICH9:
   4128 	case WM_T_ICH10:
   4129 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4130 		    PBA_14K : PBA_10K;
   4131 		break;
   4132 	case WM_T_PCH:
   4133 	case WM_T_PCH2:
   4134 	case WM_T_PCH_LPT:
   4135 	case WM_T_PCH_SPT:
   4136 		sc->sc_pba = PBA_26K;
   4137 		break;
   4138 	default:
   4139 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4140 		    PBA_40K : PBA_48K;
   4141 		break;
   4142 	}
   4143 	/*
   4144 	 * Only old or non-multiqueue devices have the PBA register
   4145 	 * XXX Need special handling for 82575.
   4146 	 */
   4147 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4148 	    || (sc->sc_type == WM_T_82575))
   4149 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4150 
   4151 	/* Prevent the PCI-E bus from sticking */
   4152 	if (sc->sc_flags & WM_F_PCIE) {
   4153 		int timeout = 800;
   4154 
   4155 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4157 
   4158 		while (timeout--) {
   4159 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4160 			    == 0)
   4161 				break;
   4162 			delay(100);
   4163 		}
   4164 		if (timeout == 0)
   4165 			device_printf(sc->sc_dev,
   4166 			    "failed to disable busmastering\n");
   4167 	}
   4168 
   4169 	/* Set the completion timeout for interface */
   4170 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4171 	    || (sc->sc_type == WM_T_82580)
   4172 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4173 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4174 		wm_set_pcie_completion_timeout(sc);
   4175 
   4176 	/* Clear interrupt */
   4177 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4178 	if (wm_is_using_msix(sc)) {
   4179 		if (sc->sc_type != WM_T_82574) {
   4180 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4181 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4182 		} else {
   4183 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4184 		}
   4185 	}
   4186 
   4187 	/* Stop the transmit and receive processes. */
   4188 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4189 	sc->sc_rctl &= ~RCTL_EN;
   4190 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4191 	CSR_WRITE_FLUSH(sc);
   4192 
   4193 	/* XXX set_tbi_sbp_82543() */
   4194 
   4195 	delay(10*1000);
   4196 
   4197 	/* Must acquire the MDIO ownership before MAC reset */
   4198 	switch (sc->sc_type) {
   4199 	case WM_T_82573:
   4200 	case WM_T_82574:
   4201 	case WM_T_82583:
   4202 		error = wm_get_hw_semaphore_82573(sc);
   4203 		break;
   4204 	default:
   4205 		break;
   4206 	}
   4207 
   4208 	/*
   4209 	 * 82541 Errata 29? & 82547 Errata 28?
   4210 	 * See also the description about PHY_RST bit in CTRL register
   4211 	 * in 8254x_GBe_SDM.pdf.
   4212 	 */
   4213 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4214 		CSR_WRITE(sc, WMREG_CTRL,
   4215 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4216 		CSR_WRITE_FLUSH(sc);
   4217 		delay(5000);
   4218 	}
   4219 
   4220 	switch (sc->sc_type) {
   4221 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4222 	case WM_T_82541:
   4223 	case WM_T_82541_2:
   4224 	case WM_T_82547:
   4225 	case WM_T_82547_2:
   4226 		/*
   4227 		 * On some chipsets, a reset through a memory-mapped write
   4228 		 * cycle can cause the chip to reset before completing the
   4229 		 * write cycle.  This causes major headache that can be
   4230 		 * avoided by issuing the reset via indirect register writes
   4231 		 * through I/O space.
   4232 		 *
   4233 		 * So, if we successfully mapped the I/O BAR at attach time,
   4234 		 * use that.  Otherwise, try our luck with a memory-mapped
   4235 		 * reset.
   4236 		 */
   4237 		if (sc->sc_flags & WM_F_IOH_VALID)
   4238 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4239 		else
   4240 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4241 		break;
   4242 	case WM_T_82545_3:
   4243 	case WM_T_82546_3:
   4244 		/* Use the shadow control register on these chips. */
   4245 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4246 		break;
   4247 	case WM_T_80003:
   4248 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4249 		sc->phy.acquire(sc);
   4250 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4251 		sc->phy.release(sc);
   4252 		break;
   4253 	case WM_T_ICH8:
   4254 	case WM_T_ICH9:
   4255 	case WM_T_ICH10:
   4256 	case WM_T_PCH:
   4257 	case WM_T_PCH2:
   4258 	case WM_T_PCH_LPT:
   4259 	case WM_T_PCH_SPT:
   4260 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4261 		if (wm_phy_resetisblocked(sc) == false) {
   4262 			/*
   4263 			 * Gate automatic PHY configuration by hardware on
   4264 			 * non-managed 82579
   4265 			 */
   4266 			if ((sc->sc_type == WM_T_PCH2)
   4267 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4268 				== 0))
   4269 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4270 
   4271 			reg |= CTRL_PHY_RESET;
   4272 			phy_reset = 1;
   4273 		} else
   4274 			printf("XXX reset is blocked!!!\n");
   4275 		sc->phy.acquire(sc);
   4276 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4277 		/* Don't insert a completion barrier when reset */
   4278 		delay(20*1000);
   4279 		mutex_exit(sc->sc_ich_phymtx);
   4280 		break;
   4281 	case WM_T_82580:
   4282 	case WM_T_I350:
   4283 	case WM_T_I354:
   4284 	case WM_T_I210:
   4285 	case WM_T_I211:
   4286 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4287 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4288 			CSR_WRITE_FLUSH(sc);
   4289 		delay(5000);
   4290 		break;
   4291 	case WM_T_82542_2_0:
   4292 	case WM_T_82542_2_1:
   4293 	case WM_T_82543:
   4294 	case WM_T_82540:
   4295 	case WM_T_82545:
   4296 	case WM_T_82546:
   4297 	case WM_T_82571:
   4298 	case WM_T_82572:
   4299 	case WM_T_82573:
   4300 	case WM_T_82574:
   4301 	case WM_T_82575:
   4302 	case WM_T_82576:
   4303 	case WM_T_82583:
   4304 	default:
   4305 		/* Everything else can safely use the documented method. */
   4306 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4307 		break;
   4308 	}
   4309 
   4310 	/* Must release the MDIO ownership after MAC reset */
   4311 	switch (sc->sc_type) {
   4312 	case WM_T_82573:
   4313 	case WM_T_82574:
   4314 	case WM_T_82583:
   4315 		if (error == 0)
   4316 			wm_put_hw_semaphore_82573(sc);
   4317 		break;
   4318 	default:
   4319 		break;
   4320 	}
   4321 
   4322 	if (phy_reset != 0)
   4323 		wm_get_cfg_done(sc);
   4324 
   4325 	/* reload EEPROM */
   4326 	switch (sc->sc_type) {
   4327 	case WM_T_82542_2_0:
   4328 	case WM_T_82542_2_1:
   4329 	case WM_T_82543:
   4330 	case WM_T_82544:
   4331 		delay(10);
   4332 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4333 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4334 		CSR_WRITE_FLUSH(sc);
   4335 		delay(2000);
   4336 		break;
   4337 	case WM_T_82540:
   4338 	case WM_T_82545:
   4339 	case WM_T_82545_3:
   4340 	case WM_T_82546:
   4341 	case WM_T_82546_3:
   4342 		delay(5*1000);
   4343 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4344 		break;
   4345 	case WM_T_82541:
   4346 	case WM_T_82541_2:
   4347 	case WM_T_82547:
   4348 	case WM_T_82547_2:
   4349 		delay(20000);
   4350 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4351 		break;
   4352 	case WM_T_82571:
   4353 	case WM_T_82572:
   4354 	case WM_T_82573:
   4355 	case WM_T_82574:
   4356 	case WM_T_82583:
   4357 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4358 			delay(10);
   4359 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4360 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4361 			CSR_WRITE_FLUSH(sc);
   4362 		}
   4363 		/* check EECD_EE_AUTORD */
   4364 		wm_get_auto_rd_done(sc);
   4365 		/*
   4366 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4367 		 * is set.
   4368 		 */
   4369 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4370 		    || (sc->sc_type == WM_T_82583))
   4371 			delay(25*1000);
   4372 		break;
   4373 	case WM_T_82575:
   4374 	case WM_T_82576:
   4375 	case WM_T_82580:
   4376 	case WM_T_I350:
   4377 	case WM_T_I354:
   4378 	case WM_T_I210:
   4379 	case WM_T_I211:
   4380 	case WM_T_80003:
   4381 		/* check EECD_EE_AUTORD */
   4382 		wm_get_auto_rd_done(sc);
   4383 		break;
   4384 	case WM_T_ICH8:
   4385 	case WM_T_ICH9:
   4386 	case WM_T_ICH10:
   4387 	case WM_T_PCH:
   4388 	case WM_T_PCH2:
   4389 	case WM_T_PCH_LPT:
   4390 	case WM_T_PCH_SPT:
   4391 		break;
   4392 	default:
   4393 		panic("%s: unknown type\n", __func__);
   4394 	}
   4395 
   4396 	/* Check whether EEPROM is present or not */
   4397 	switch (sc->sc_type) {
   4398 	case WM_T_82575:
   4399 	case WM_T_82576:
   4400 	case WM_T_82580:
   4401 	case WM_T_I350:
   4402 	case WM_T_I354:
   4403 	case WM_T_ICH8:
   4404 	case WM_T_ICH9:
   4405 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4406 			/* Not found */
   4407 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4408 			if (sc->sc_type == WM_T_82575)
   4409 				wm_reset_init_script_82575(sc);
   4410 		}
   4411 		break;
   4412 	default:
   4413 		break;
   4414 	}
   4415 
   4416 	if (phy_reset != 0)
   4417 		wm_phy_post_reset(sc);
   4418 
   4419 	if ((sc->sc_type == WM_T_82580)
   4420 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4421 		/* clear global device reset status bit */
   4422 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4423 	}
   4424 
   4425 	/* Clear any pending interrupt events. */
   4426 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4427 	reg = CSR_READ(sc, WMREG_ICR);
   4428 	if (wm_is_using_msix(sc)) {
   4429 		if (sc->sc_type != WM_T_82574) {
   4430 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4431 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4432 		} else
   4433 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4434 	}
   4435 
   4436 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4437 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4438 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4439 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4440 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4441 		reg |= KABGTXD_BGSQLBIAS;
   4442 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4443 	}
   4444 
   4445 	/* reload sc_ctrl */
   4446 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4447 
   4448 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4449 		wm_set_eee_i350(sc);
   4450 
   4451 	/*
   4452 	 * For PCH, this write will make sure that any noise will be detected
   4453 	 * as a CRC error and be dropped rather than show up as a bad packet
   4454 	 * to the DMA engine
   4455 	 */
   4456 	if (sc->sc_type == WM_T_PCH)
   4457 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4458 
   4459 	if (sc->sc_type >= WM_T_82544)
   4460 		CSR_WRITE(sc, WMREG_WUC, 0);
   4461 
   4462 	wm_reset_mdicnfg_82580(sc);
   4463 
   4464 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4465 		wm_pll_workaround_i210(sc);
   4466 }
   4467 
   4468 /*
   4469  * wm_add_rxbuf:
   4470  *
   4471  *	Add a receive buffer to the indiciated descriptor.
   4472  */
   4473 static int
   4474 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4475 {
   4476 	struct wm_softc *sc = rxq->rxq_sc;
   4477 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4478 	struct mbuf *m;
   4479 	int error;
   4480 
   4481 	KASSERT(mutex_owned(rxq->rxq_lock));
   4482 
   4483 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4484 	if (m == NULL)
   4485 		return ENOBUFS;
   4486 
   4487 	MCLGET(m, M_DONTWAIT);
   4488 	if ((m->m_flags & M_EXT) == 0) {
   4489 		m_freem(m);
   4490 		return ENOBUFS;
   4491 	}
   4492 
   4493 	if (rxs->rxs_mbuf != NULL)
   4494 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4495 
   4496 	rxs->rxs_mbuf = m;
   4497 
   4498 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4499 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4500 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4501 	if (error) {
   4502 		/* XXX XXX XXX */
   4503 		aprint_error_dev(sc->sc_dev,
   4504 		    "unable to load rx DMA map %d, error = %d\n",
   4505 		    idx, error);
   4506 		panic("wm_add_rxbuf");
   4507 	}
   4508 
   4509 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4510 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4511 
   4512 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4513 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4514 			wm_init_rxdesc(rxq, idx);
   4515 	} else
   4516 		wm_init_rxdesc(rxq, idx);
   4517 
   4518 	return 0;
   4519 }
   4520 
   4521 /*
   4522  * wm_rxdrain:
   4523  *
   4524  *	Drain the receive queue.
   4525  */
   4526 static void
   4527 wm_rxdrain(struct wm_rxqueue *rxq)
   4528 {
   4529 	struct wm_softc *sc = rxq->rxq_sc;
   4530 	struct wm_rxsoft *rxs;
   4531 	int i;
   4532 
   4533 	KASSERT(mutex_owned(rxq->rxq_lock));
   4534 
   4535 	for (i = 0; i < WM_NRXDESC; i++) {
   4536 		rxs = &rxq->rxq_soft[i];
   4537 		if (rxs->rxs_mbuf != NULL) {
   4538 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4539 			m_freem(rxs->rxs_mbuf);
   4540 			rxs->rxs_mbuf = NULL;
   4541 		}
   4542 	}
   4543 }
   4544 
   4545 
   4546 /*
   4547  * XXX copy from FreeBSD's sys/net/rss_config.c
   4548  */
   4549 /*
   4550  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4551  * effectiveness may be limited by algorithm choice and available entropy
   4552  * during the boot.
   4553  *
   4554  * XXXRW: And that we don't randomize it yet!
   4555  *
   4556  * This is the default Microsoft RSS specification key which is also
   4557  * the Chelsio T5 firmware default key.
   4558  */
   4559 #define RSS_KEYSIZE 40
   4560 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4561 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4562 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4563 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4564 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4565 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4566 };
   4567 
   4568 /*
   4569  * Caller must pass an array of size sizeof(rss_key).
   4570  *
   4571  * XXX
   4572  * As if_ixgbe may use this function, this function should not be
   4573  * if_wm specific function.
   4574  */
   4575 static void
   4576 wm_rss_getkey(uint8_t *key)
   4577 {
   4578 
   4579 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4580 }
   4581 
   4582 /*
   4583  * Setup registers for RSS.
   4584  *
   4585  * XXX not yet VMDq support
   4586  */
   4587 static void
   4588 wm_init_rss(struct wm_softc *sc)
   4589 {
   4590 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4591 	int i;
   4592 
   4593 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4594 
   4595 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4596 		int qid, reta_ent;
   4597 
   4598 		qid  = i % sc->sc_nqueues;
   4599 		switch(sc->sc_type) {
   4600 		case WM_T_82574:
   4601 			reta_ent = __SHIFTIN(qid,
   4602 			    RETA_ENT_QINDEX_MASK_82574);
   4603 			break;
   4604 		case WM_T_82575:
   4605 			reta_ent = __SHIFTIN(qid,
   4606 			    RETA_ENT_QINDEX1_MASK_82575);
   4607 			break;
   4608 		default:
   4609 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4610 			break;
   4611 		}
   4612 
   4613 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4614 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4615 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4616 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4617 	}
   4618 
   4619 	wm_rss_getkey((uint8_t *)rss_key);
   4620 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4621 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4622 
   4623 	if (sc->sc_type == WM_T_82574)
   4624 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4625 	else
   4626 		mrqc = MRQC_ENABLE_RSS_MQ;
   4627 
   4628 	/*
   4629 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4630 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4631 	 */
   4632 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4633 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4634 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4635 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4636 
   4637 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4638 }
   4639 
   4640 /*
   4641  * Adjust TX and RX queue numbers which the system actulally uses.
   4642  *
   4643  * The numbers are affected by below parameters.
   4644  *     - The nubmer of hardware queues
   4645  *     - The number of MSI-X vectors (= "nvectors" argument)
   4646  *     - ncpu
   4647  */
   4648 static void
   4649 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4650 {
   4651 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4652 
   4653 	if (nvectors < 2) {
   4654 		sc->sc_nqueues = 1;
   4655 		return;
   4656 	}
   4657 
   4658 	switch(sc->sc_type) {
   4659 	case WM_T_82572:
   4660 		hw_ntxqueues = 2;
   4661 		hw_nrxqueues = 2;
   4662 		break;
   4663 	case WM_T_82574:
   4664 		hw_ntxqueues = 2;
   4665 		hw_nrxqueues = 2;
   4666 		break;
   4667 	case WM_T_82575:
   4668 		hw_ntxqueues = 4;
   4669 		hw_nrxqueues = 4;
   4670 		break;
   4671 	case WM_T_82576:
   4672 		hw_ntxqueues = 16;
   4673 		hw_nrxqueues = 16;
   4674 		break;
   4675 	case WM_T_82580:
   4676 	case WM_T_I350:
   4677 	case WM_T_I354:
   4678 		hw_ntxqueues = 8;
   4679 		hw_nrxqueues = 8;
   4680 		break;
   4681 	case WM_T_I210:
   4682 		hw_ntxqueues = 4;
   4683 		hw_nrxqueues = 4;
   4684 		break;
   4685 	case WM_T_I211:
   4686 		hw_ntxqueues = 2;
   4687 		hw_nrxqueues = 2;
   4688 		break;
   4689 		/*
   4690 		 * As below ethernet controllers does not support MSI-X,
   4691 		 * this driver let them not use multiqueue.
   4692 		 *     - WM_T_80003
   4693 		 *     - WM_T_ICH8
   4694 		 *     - WM_T_ICH9
   4695 		 *     - WM_T_ICH10
   4696 		 *     - WM_T_PCH
   4697 		 *     - WM_T_PCH2
   4698 		 *     - WM_T_PCH_LPT
   4699 		 */
   4700 	default:
   4701 		hw_ntxqueues = 1;
   4702 		hw_nrxqueues = 1;
   4703 		break;
   4704 	}
   4705 
   4706 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4707 
   4708 	/*
   4709 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4710 	 * the number of queues used actually.
   4711 	 */
   4712 	if (nvectors < hw_nqueues + 1) {
   4713 		sc->sc_nqueues = nvectors - 1;
   4714 	} else {
   4715 		sc->sc_nqueues = hw_nqueues;
   4716 	}
   4717 
   4718 	/*
   4719 	 * As queues more then cpus cannot improve scaling, we limit
   4720 	 * the number of queues used actually.
   4721 	 */
   4722 	if (ncpu < sc->sc_nqueues)
   4723 		sc->sc_nqueues = ncpu;
   4724 }
   4725 
   4726 static inline bool
   4727 wm_is_using_msix(struct wm_softc *sc)
   4728 {
   4729 
   4730 	return (sc->sc_nintrs > 1);
   4731 }
   4732 
   4733 static inline bool
   4734 wm_is_using_multiqueue(struct wm_softc *sc)
   4735 {
   4736 
   4737 	return (sc->sc_nqueues > 1);
   4738 }
   4739 
   4740 static int
   4741 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4742 {
   4743 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4744 	wmq->wmq_id = qidx;
   4745 	wmq->wmq_intr_idx = intr_idx;
   4746 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4747 #ifdef WM_MPSAFE
   4748 	    | SOFTINT_MPSAFE
   4749 #endif
   4750 	    , wm_handle_queue, wmq);
   4751 	if (wmq->wmq_si != NULL)
   4752 		return 0;
   4753 
   4754 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4755 	    wmq->wmq_id);
   4756 
   4757 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4758 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4759 	return ENOMEM;
   4760 }
   4761 
   4762 /*
   4763  * Both single interrupt MSI and INTx can use this function.
   4764  */
   4765 static int
   4766 wm_setup_legacy(struct wm_softc *sc)
   4767 {
   4768 	pci_chipset_tag_t pc = sc->sc_pc;
   4769 	const char *intrstr = NULL;
   4770 	char intrbuf[PCI_INTRSTR_LEN];
   4771 	int error;
   4772 
   4773 	error = wm_alloc_txrx_queues(sc);
   4774 	if (error) {
   4775 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4776 		    error);
   4777 		return ENOMEM;
   4778 	}
   4779 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4780 	    sizeof(intrbuf));
   4781 #ifdef WM_MPSAFE
   4782 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4783 #endif
   4784 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4785 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4786 	if (sc->sc_ihs[0] == NULL) {
   4787 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4788 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4789 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4790 		return ENOMEM;
   4791 	}
   4792 
   4793 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4794 	sc->sc_nintrs = 1;
   4795 
   4796 	return wm_softint_establish(sc, 0, 0);
   4797 }
   4798 
   4799 static int
   4800 wm_setup_msix(struct wm_softc *sc)
   4801 {
   4802 	void *vih;
   4803 	kcpuset_t *affinity;
   4804 	int qidx, error, intr_idx, txrx_established;
   4805 	pci_chipset_tag_t pc = sc->sc_pc;
   4806 	const char *intrstr = NULL;
   4807 	char intrbuf[PCI_INTRSTR_LEN];
   4808 	char intr_xname[INTRDEVNAMEBUF];
   4809 
   4810 	if (sc->sc_nqueues < ncpu) {
   4811 		/*
   4812 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4813 		 * interrupts start from CPU#1.
   4814 		 */
   4815 		sc->sc_affinity_offset = 1;
   4816 	} else {
   4817 		/*
   4818 		 * In this case, this device use all CPUs. So, we unify
   4819 		 * affinitied cpu_index to msix vector number for readability.
   4820 		 */
   4821 		sc->sc_affinity_offset = 0;
   4822 	}
   4823 
   4824 	error = wm_alloc_txrx_queues(sc);
   4825 	if (error) {
   4826 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4827 		    error);
   4828 		return ENOMEM;
   4829 	}
   4830 
   4831 	kcpuset_create(&affinity, false);
   4832 	intr_idx = 0;
   4833 
   4834 	/*
   4835 	 * TX and RX
   4836 	 */
   4837 	txrx_established = 0;
   4838 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4839 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4840 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4841 
   4842 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4843 		    sizeof(intrbuf));
   4844 #ifdef WM_MPSAFE
   4845 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4846 		    PCI_INTR_MPSAFE, true);
   4847 #endif
   4848 		memset(intr_xname, 0, sizeof(intr_xname));
   4849 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4850 		    device_xname(sc->sc_dev), qidx);
   4851 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4852 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4853 		if (vih == NULL) {
   4854 			aprint_error_dev(sc->sc_dev,
   4855 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4856 			    intrstr ? " at " : "",
   4857 			    intrstr ? intrstr : "");
   4858 
   4859 			goto fail;
   4860 		}
   4861 		kcpuset_zero(affinity);
   4862 		/* Round-robin affinity */
   4863 		kcpuset_set(affinity, affinity_to);
   4864 		error = interrupt_distribute(vih, affinity, NULL);
   4865 		if (error == 0) {
   4866 			aprint_normal_dev(sc->sc_dev,
   4867 			    "for TX and RX interrupting at %s affinity to %u\n",
   4868 			    intrstr, affinity_to);
   4869 		} else {
   4870 			aprint_normal_dev(sc->sc_dev,
   4871 			    "for TX and RX interrupting at %s\n", intrstr);
   4872 		}
   4873 		sc->sc_ihs[intr_idx] = vih;
   4874 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4875 			goto fail;
   4876 		txrx_established++;
   4877 		intr_idx++;
   4878 	}
   4879 
   4880 	/*
   4881 	 * LINK
   4882 	 */
   4883 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4884 	    sizeof(intrbuf));
   4885 #ifdef WM_MPSAFE
   4886 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4887 #endif
   4888 	memset(intr_xname, 0, sizeof(intr_xname));
   4889 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4890 	    device_xname(sc->sc_dev));
   4891 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4892 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4893 	if (vih == NULL) {
   4894 		aprint_error_dev(sc->sc_dev,
   4895 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4896 		    intrstr ? " at " : "",
   4897 		    intrstr ? intrstr : "");
   4898 
   4899 		goto fail;
   4900 	}
   4901 	/* keep default affinity to LINK interrupt */
   4902 	aprint_normal_dev(sc->sc_dev,
   4903 	    "for LINK interrupting at %s\n", intrstr);
   4904 	sc->sc_ihs[intr_idx] = vih;
   4905 	sc->sc_link_intr_idx = intr_idx;
   4906 
   4907 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4908 	kcpuset_destroy(affinity);
   4909 	return 0;
   4910 
   4911  fail:
   4912 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4913 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4914 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4915 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4916 	}
   4917 
   4918 	kcpuset_destroy(affinity);
   4919 	return ENOMEM;
   4920 }
   4921 
   4922 static void
   4923 wm_turnon(struct wm_softc *sc)
   4924 {
   4925 	int i;
   4926 
   4927 	KASSERT(WM_CORE_LOCKED(sc));
   4928 
   4929 	/*
   4930 	 * must unset stopping flags in ascending order.
   4931 	 */
   4932 	for(i = 0; i < sc->sc_nqueues; i++) {
   4933 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4934 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4935 
   4936 		mutex_enter(txq->txq_lock);
   4937 		txq->txq_stopping = false;
   4938 		mutex_exit(txq->txq_lock);
   4939 
   4940 		mutex_enter(rxq->rxq_lock);
   4941 		rxq->rxq_stopping = false;
   4942 		mutex_exit(rxq->rxq_lock);
   4943 	}
   4944 
   4945 	sc->sc_core_stopping = false;
   4946 }
   4947 
   4948 static void
   4949 wm_turnoff(struct wm_softc *sc)
   4950 {
   4951 	int i;
   4952 
   4953 	KASSERT(WM_CORE_LOCKED(sc));
   4954 
   4955 	sc->sc_core_stopping = true;
   4956 
   4957 	/*
   4958 	 * must set stopping flags in ascending order.
   4959 	 */
   4960 	for(i = 0; i < sc->sc_nqueues; i++) {
   4961 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4962 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4963 
   4964 		mutex_enter(rxq->rxq_lock);
   4965 		rxq->rxq_stopping = true;
   4966 		mutex_exit(rxq->rxq_lock);
   4967 
   4968 		mutex_enter(txq->txq_lock);
   4969 		txq->txq_stopping = true;
   4970 		mutex_exit(txq->txq_lock);
   4971 	}
   4972 }
   4973 
   4974 /*
   4975  * write interrupt interval value to ITR or EITR
   4976  */
   4977 static void
   4978 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4979 {
   4980 
   4981 	if (!wmq->wmq_set_itr)
   4982 		return;
   4983 
   4984 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4985 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4986 
   4987 		/*
   4988 		 * 82575 doesn't have CNT_INGR field.
   4989 		 * So, overwrite counter field by software.
   4990 		 */
   4991 		if (sc->sc_type == WM_T_82575)
   4992 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4993 		else
   4994 			eitr |= EITR_CNT_INGR;
   4995 
   4996 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4997 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4998 		/*
   4999 		 * 82574 has both ITR and EITR. SET EITR when we use
   5000 		 * the multi queue function with MSI-X.
   5001 		 */
   5002 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5003 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5004 	} else {
   5005 		KASSERT(wmq->wmq_id == 0);
   5006 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5007 	}
   5008 
   5009 	wmq->wmq_set_itr = false;
   5010 }
   5011 
   5012 /*
   5013  * TODO
   5014  * Below dynamic calculation of itr is almost the same as linux igb,
   5015  * however it does not fit to wm(4). So, we will have been disable AIM
   5016  * until we will find appropriate calculation of itr.
   5017  */
   5018 /*
   5019  * calculate interrupt interval value to be going to write register in
   5020  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5021  */
   5022 static void
   5023 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5024 {
   5025 #ifdef NOTYET
   5026 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5027 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5028 	uint32_t avg_size = 0;
   5029 	uint32_t new_itr;
   5030 
   5031 	if (rxq->rxq_packets)
   5032 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5033 	if (txq->txq_packets)
   5034 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5035 
   5036 	if (avg_size == 0) {
   5037 		new_itr = 450; /* restore default value */
   5038 		goto out;
   5039 	}
   5040 
   5041 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5042 	avg_size += 24;
   5043 
   5044 	/* Don't starve jumbo frames */
   5045 	avg_size = min(avg_size, 3000);
   5046 
   5047 	/* Give a little boost to mid-size frames */
   5048 	if ((avg_size > 300) && (avg_size < 1200))
   5049 		new_itr = avg_size / 3;
   5050 	else
   5051 		new_itr = avg_size / 2;
   5052 
   5053 out:
   5054 	/*
   5055 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5056 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5057 	 */
   5058 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5059 		new_itr *= 4;
   5060 
   5061 	if (new_itr != wmq->wmq_itr) {
   5062 		wmq->wmq_itr = new_itr;
   5063 		wmq->wmq_set_itr = true;
   5064 	} else
   5065 		wmq->wmq_set_itr = false;
   5066 
   5067 	rxq->rxq_packets = 0;
   5068 	rxq->rxq_bytes = 0;
   5069 	txq->txq_packets = 0;
   5070 	txq->txq_bytes = 0;
   5071 #endif
   5072 }
   5073 
   5074 /*
   5075  * wm_init:		[ifnet interface function]
   5076  *
   5077  *	Initialize the interface.
   5078  */
   5079 static int
   5080 wm_init(struct ifnet *ifp)
   5081 {
   5082 	struct wm_softc *sc = ifp->if_softc;
   5083 	int ret;
   5084 
   5085 	WM_CORE_LOCK(sc);
   5086 	ret = wm_init_locked(ifp);
   5087 	WM_CORE_UNLOCK(sc);
   5088 
   5089 	return ret;
   5090 }
   5091 
   5092 static int
   5093 wm_init_locked(struct ifnet *ifp)
   5094 {
   5095 	struct wm_softc *sc = ifp->if_softc;
   5096 	int i, j, trynum, error = 0;
   5097 	uint32_t reg;
   5098 
   5099 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5100 		device_xname(sc->sc_dev), __func__));
   5101 	KASSERT(WM_CORE_LOCKED(sc));
   5102 
   5103 	/*
   5104 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5105 	 * There is a small but measurable benefit to avoiding the adjusment
   5106 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5107 	 * on such platforms.  One possibility is that the DMA itself is
   5108 	 * slightly more efficient if the front of the entire packet (instead
   5109 	 * of the front of the headers) is aligned.
   5110 	 *
   5111 	 * Note we must always set align_tweak to 0 if we are using
   5112 	 * jumbo frames.
   5113 	 */
   5114 #ifdef __NO_STRICT_ALIGNMENT
   5115 	sc->sc_align_tweak = 0;
   5116 #else
   5117 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5118 		sc->sc_align_tweak = 0;
   5119 	else
   5120 		sc->sc_align_tweak = 2;
   5121 #endif /* __NO_STRICT_ALIGNMENT */
   5122 
   5123 	/* Cancel any pending I/O. */
   5124 	wm_stop_locked(ifp, 0);
   5125 
   5126 	/* update statistics before reset */
   5127 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5128 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5129 
   5130 	/* PCH_SPT hardware workaround */
   5131 	if (sc->sc_type == WM_T_PCH_SPT)
   5132 		wm_flush_desc_rings(sc);
   5133 
   5134 	/* Reset the chip to a known state. */
   5135 	wm_reset(sc);
   5136 
   5137 	/*
   5138 	 * AMT based hardware can now take control from firmware
   5139 	 * Do this after reset.
   5140 	 */
   5141 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5142 		wm_get_hw_control(sc);
   5143 
   5144 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5145 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5146 		wm_legacy_irq_quirk_spt(sc);
   5147 
   5148 	/* Init hardware bits */
   5149 	wm_initialize_hardware_bits(sc);
   5150 
   5151 	/* Reset the PHY. */
   5152 	if (sc->sc_flags & WM_F_HAS_MII)
   5153 		wm_gmii_reset(sc);
   5154 
   5155 	/* Calculate (E)ITR value */
   5156 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5157 		/*
   5158 		 * For NEWQUEUE's EITR (except for 82575).
   5159 		 * 82575's EITR should be set same throttling value as other
   5160 		 * old controllers' ITR because the interrupt/sec calculation
   5161 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5162 		 *
   5163 		 * 82574's EITR should be set same throttling value as ITR.
   5164 		 *
   5165 		 * For N interrupts/sec, set this value to:
   5166 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5167 		 */
   5168 		sc->sc_itr_init = 450;
   5169 	} else if (sc->sc_type >= WM_T_82543) {
   5170 		/*
   5171 		 * Set up the interrupt throttling register (units of 256ns)
   5172 		 * Note that a footnote in Intel's documentation says this
   5173 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5174 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5175 		 * that that is also true for the 1024ns units of the other
   5176 		 * interrupt-related timer registers -- so, really, we ought
   5177 		 * to divide this value by 4 when the link speed is low.
   5178 		 *
   5179 		 * XXX implement this division at link speed change!
   5180 		 */
   5181 
   5182 		/*
   5183 		 * For N interrupts/sec, set this value to:
   5184 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5185 		 * absolute and packet timer values to this value
   5186 		 * divided by 4 to get "simple timer" behavior.
   5187 		 */
   5188 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5189 	}
   5190 
   5191 	error = wm_init_txrx_queues(sc);
   5192 	if (error)
   5193 		goto out;
   5194 
   5195 	/*
   5196 	 * Clear out the VLAN table -- we don't use it (yet).
   5197 	 */
   5198 	CSR_WRITE(sc, WMREG_VET, 0);
   5199 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5200 		trynum = 10; /* Due to hw errata */
   5201 	else
   5202 		trynum = 1;
   5203 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5204 		for (j = 0; j < trynum; j++)
   5205 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5206 
   5207 	/*
   5208 	 * Set up flow-control parameters.
   5209 	 *
   5210 	 * XXX Values could probably stand some tuning.
   5211 	 */
   5212 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5213 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5214 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5215 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5216 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5217 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5218 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5219 	}
   5220 
   5221 	sc->sc_fcrtl = FCRTL_DFLT;
   5222 	if (sc->sc_type < WM_T_82543) {
   5223 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5224 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5225 	} else {
   5226 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5227 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5228 	}
   5229 
   5230 	if (sc->sc_type == WM_T_80003)
   5231 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5232 	else
   5233 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5234 
   5235 	/* Writes the control register. */
   5236 	wm_set_vlan(sc);
   5237 
   5238 	if (sc->sc_flags & WM_F_HAS_MII) {
   5239 		int val;
   5240 
   5241 		switch (sc->sc_type) {
   5242 		case WM_T_80003:
   5243 		case WM_T_ICH8:
   5244 		case WM_T_ICH9:
   5245 		case WM_T_ICH10:
   5246 		case WM_T_PCH:
   5247 		case WM_T_PCH2:
   5248 		case WM_T_PCH_LPT:
   5249 		case WM_T_PCH_SPT:
   5250 			/*
   5251 			 * Set the mac to wait the maximum time between each
   5252 			 * iteration and increase the max iterations when
   5253 			 * polling the phy; this fixes erroneous timeouts at
   5254 			 * 10Mbps.
   5255 			 */
   5256 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5257 			    0xFFFF);
   5258 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5259 			val |= 0x3F;
   5260 			wm_kmrn_writereg(sc,
   5261 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5262 			break;
   5263 		default:
   5264 			break;
   5265 		}
   5266 
   5267 		if (sc->sc_type == WM_T_80003) {
   5268 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5269 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5270 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5271 
   5272 			/* Bypass RX and TX FIFO's */
   5273 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5274 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5275 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5276 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5277 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5278 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5279 		}
   5280 	}
   5281 #if 0
   5282 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5283 #endif
   5284 
   5285 	/* Set up checksum offload parameters. */
   5286 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5287 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5288 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5289 		reg |= RXCSUM_IPOFL;
   5290 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5291 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5292 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5293 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5294 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5295 
   5296 	/* Set registers about MSI-X */
   5297 	if (wm_is_using_msix(sc)) {
   5298 		uint32_t ivar;
   5299 		struct wm_queue *wmq;
   5300 		int qid, qintr_idx;
   5301 
   5302 		if (sc->sc_type == WM_T_82575) {
   5303 			/* Interrupt control */
   5304 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5305 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5306 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5307 
   5308 			/* TX and RX */
   5309 			for (i = 0; i < sc->sc_nqueues; i++) {
   5310 				wmq = &sc->sc_queue[i];
   5311 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5312 				    EITR_TX_QUEUE(wmq->wmq_id)
   5313 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5314 			}
   5315 			/* Link status */
   5316 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5317 			    EITR_OTHER);
   5318 		} else if (sc->sc_type == WM_T_82574) {
   5319 			/* Interrupt control */
   5320 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5321 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5322 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5323 
   5324 			/*
   5325 			 * workaround issue with spurious interrupts
   5326 			 * in MSI-X mode.
   5327 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5328 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5329 			 */
   5330 			reg = CSR_READ(sc, WMREG_RFCTL);
   5331 			reg |= WMREG_RFCTL_ACKDIS;
   5332 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5333 
   5334 			ivar = 0;
   5335 			/* TX and RX */
   5336 			for (i = 0; i < sc->sc_nqueues; i++) {
   5337 				wmq = &sc->sc_queue[i];
   5338 				qid = wmq->wmq_id;
   5339 				qintr_idx = wmq->wmq_intr_idx;
   5340 
   5341 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5342 				    IVAR_TX_MASK_Q_82574(qid));
   5343 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5344 				    IVAR_RX_MASK_Q_82574(qid));
   5345 			}
   5346 			/* Link status */
   5347 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5348 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5349 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5350 		} else {
   5351 			/* Interrupt control */
   5352 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5353 			    | GPIE_EIAME | GPIE_PBA);
   5354 
   5355 			switch (sc->sc_type) {
   5356 			case WM_T_82580:
   5357 			case WM_T_I350:
   5358 			case WM_T_I354:
   5359 			case WM_T_I210:
   5360 			case WM_T_I211:
   5361 				/* TX and RX */
   5362 				for (i = 0; i < sc->sc_nqueues; i++) {
   5363 					wmq = &sc->sc_queue[i];
   5364 					qid = wmq->wmq_id;
   5365 					qintr_idx = wmq->wmq_intr_idx;
   5366 
   5367 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5368 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5369 					ivar |= __SHIFTIN((qintr_idx
   5370 						| IVAR_VALID),
   5371 					    IVAR_TX_MASK_Q(qid));
   5372 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5373 					ivar |= __SHIFTIN((qintr_idx
   5374 						| IVAR_VALID),
   5375 					    IVAR_RX_MASK_Q(qid));
   5376 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5377 				}
   5378 				break;
   5379 			case WM_T_82576:
   5380 				/* TX and RX */
   5381 				for (i = 0; i < sc->sc_nqueues; i++) {
   5382 					wmq = &sc->sc_queue[i];
   5383 					qid = wmq->wmq_id;
   5384 					qintr_idx = wmq->wmq_intr_idx;
   5385 
   5386 					ivar = CSR_READ(sc,
   5387 					    WMREG_IVAR_Q_82576(qid));
   5388 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5389 					ivar |= __SHIFTIN((qintr_idx
   5390 						| IVAR_VALID),
   5391 					    IVAR_TX_MASK_Q_82576(qid));
   5392 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5393 					ivar |= __SHIFTIN((qintr_idx
   5394 						| IVAR_VALID),
   5395 					    IVAR_RX_MASK_Q_82576(qid));
   5396 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5397 					    ivar);
   5398 				}
   5399 				break;
   5400 			default:
   5401 				break;
   5402 			}
   5403 
   5404 			/* Link status */
   5405 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5406 			    IVAR_MISC_OTHER);
   5407 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5408 		}
   5409 
   5410 		if (wm_is_using_multiqueue(sc)) {
   5411 			wm_init_rss(sc);
   5412 
   5413 			/*
   5414 			** NOTE: Receive Full-Packet Checksum Offload
   5415 			** is mutually exclusive with Multiqueue. However
   5416 			** this is not the same as TCP/IP checksums which
   5417 			** still work.
   5418 			*/
   5419 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5420 			reg |= RXCSUM_PCSD;
   5421 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5422 		}
   5423 	}
   5424 
   5425 	/* Set up the interrupt registers. */
   5426 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5427 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5428 	    ICR_RXO | ICR_RXT0;
   5429 	if (wm_is_using_msix(sc)) {
   5430 		uint32_t mask;
   5431 		struct wm_queue *wmq;
   5432 
   5433 		switch (sc->sc_type) {
   5434 		case WM_T_82574:
   5435 			mask = 0;
   5436 			for (i = 0; i < sc->sc_nqueues; i++) {
   5437 				wmq = &sc->sc_queue[i];
   5438 				mask |= ICR_TXQ(wmq->wmq_id);
   5439 				mask |= ICR_RXQ(wmq->wmq_id);
   5440 			}
   5441 			mask |= ICR_OTHER;
   5442 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5443 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5444 			break;
   5445 		default:
   5446 			if (sc->sc_type == WM_T_82575) {
   5447 				mask = 0;
   5448 				for (i = 0; i < sc->sc_nqueues; i++) {
   5449 					wmq = &sc->sc_queue[i];
   5450 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5451 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5452 				}
   5453 				mask |= EITR_OTHER;
   5454 			} else {
   5455 				mask = 0;
   5456 				for (i = 0; i < sc->sc_nqueues; i++) {
   5457 					wmq = &sc->sc_queue[i];
   5458 					mask |= 1 << wmq->wmq_intr_idx;
   5459 				}
   5460 				mask |= 1 << sc->sc_link_intr_idx;
   5461 			}
   5462 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5463 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5464 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5465 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5466 			break;
   5467 		}
   5468 	} else
   5469 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5470 
   5471 	/* Set up the inter-packet gap. */
   5472 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5473 
   5474 	if (sc->sc_type >= WM_T_82543) {
   5475 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5476 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5477 			wm_itrs_writereg(sc, wmq);
   5478 		}
   5479 		/*
   5480 		 * Link interrupts occur much less than TX
   5481 		 * interrupts and RX interrupts. So, we don't
   5482 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5483 		 * FreeBSD's if_igb.
   5484 		 */
   5485 	}
   5486 
   5487 	/* Set the VLAN ethernetype. */
   5488 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5489 
   5490 	/*
   5491 	 * Set up the transmit control register; we start out with
   5492 	 * a collision distance suitable for FDX, but update it whe
   5493 	 * we resolve the media type.
   5494 	 */
   5495 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5496 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5497 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5498 	if (sc->sc_type >= WM_T_82571)
   5499 		sc->sc_tctl |= TCTL_MULR;
   5500 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5501 
   5502 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5503 		/* Write TDT after TCTL.EN is set. See the document. */
   5504 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5505 	}
   5506 
   5507 	if (sc->sc_type == WM_T_80003) {
   5508 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5509 		reg &= ~TCTL_EXT_GCEX_MASK;
   5510 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5511 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5512 	}
   5513 
   5514 	/* Set the media. */
   5515 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5516 		goto out;
   5517 
   5518 	/* Configure for OS presence */
   5519 	wm_init_manageability(sc);
   5520 
   5521 	/*
   5522 	 * Set up the receive control register; we actually program
   5523 	 * the register when we set the receive filter.  Use multicast
   5524 	 * address offset type 0.
   5525 	 *
   5526 	 * Only the i82544 has the ability to strip the incoming
   5527 	 * CRC, so we don't enable that feature.
   5528 	 */
   5529 	sc->sc_mchash_type = 0;
   5530 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5531 	    | RCTL_MO(sc->sc_mchash_type);
   5532 
   5533 	/*
   5534 	 * 82574 use one buffer extended Rx descriptor.
   5535 	 */
   5536 	if (sc->sc_type == WM_T_82574)
   5537 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5538 
   5539 	/*
   5540 	 * The I350 has a bug where it always strips the CRC whether
   5541 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5542 	 */
   5543 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5544 	    || (sc->sc_type == WM_T_I210))
   5545 		sc->sc_rctl |= RCTL_SECRC;
   5546 
   5547 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5548 	    && (ifp->if_mtu > ETHERMTU)) {
   5549 		sc->sc_rctl |= RCTL_LPE;
   5550 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5551 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5552 	}
   5553 
   5554 	if (MCLBYTES == 2048) {
   5555 		sc->sc_rctl |= RCTL_2k;
   5556 	} else {
   5557 		if (sc->sc_type >= WM_T_82543) {
   5558 			switch (MCLBYTES) {
   5559 			case 4096:
   5560 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5561 				break;
   5562 			case 8192:
   5563 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5564 				break;
   5565 			case 16384:
   5566 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5567 				break;
   5568 			default:
   5569 				panic("wm_init: MCLBYTES %d unsupported",
   5570 				    MCLBYTES);
   5571 				break;
   5572 			}
   5573 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5574 	}
   5575 
   5576 	/* Enable ECC */
   5577 	switch (sc->sc_type) {
   5578 	case WM_T_82571:
   5579 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5580 		reg |= PBA_ECC_CORR_EN;
   5581 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5582 		break;
   5583 	case WM_T_PCH_LPT:
   5584 	case WM_T_PCH_SPT:
   5585 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5586 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5587 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5588 
   5589 		sc->sc_ctrl |= CTRL_MEHE;
   5590 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5591 		break;
   5592 	default:
   5593 		break;
   5594 	}
   5595 
   5596 	/* On 575 and later set RDT only if RX enabled */
   5597 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5598 		int qidx;
   5599 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5600 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5601 			for (i = 0; i < WM_NRXDESC; i++) {
   5602 				mutex_enter(rxq->rxq_lock);
   5603 				wm_init_rxdesc(rxq, i);
   5604 				mutex_exit(rxq->rxq_lock);
   5605 
   5606 			}
   5607 		}
   5608 	}
   5609 
   5610 	/* Set the receive filter. */
   5611 	wm_set_filter(sc);
   5612 
   5613 	wm_turnon(sc);
   5614 
   5615 	/* Start the one second link check clock. */
   5616 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5617 
   5618 	/* ...all done! */
   5619 	ifp->if_flags |= IFF_RUNNING;
   5620 	ifp->if_flags &= ~IFF_OACTIVE;
   5621 
   5622  out:
   5623 	sc->sc_if_flags = ifp->if_flags;
   5624 	if (error)
   5625 		log(LOG_ERR, "%s: interface not running\n",
   5626 		    device_xname(sc->sc_dev));
   5627 	return error;
   5628 }
   5629 
   5630 /*
   5631  * wm_stop:		[ifnet interface function]
   5632  *
   5633  *	Stop transmission on the interface.
   5634  */
   5635 static void
   5636 wm_stop(struct ifnet *ifp, int disable)
   5637 {
   5638 	struct wm_softc *sc = ifp->if_softc;
   5639 
   5640 	WM_CORE_LOCK(sc);
   5641 	wm_stop_locked(ifp, disable);
   5642 	WM_CORE_UNLOCK(sc);
   5643 }
   5644 
   5645 static void
   5646 wm_stop_locked(struct ifnet *ifp, int disable)
   5647 {
   5648 	struct wm_softc *sc = ifp->if_softc;
   5649 	struct wm_txsoft *txs;
   5650 	int i, qidx;
   5651 
   5652 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5653 		device_xname(sc->sc_dev), __func__));
   5654 	KASSERT(WM_CORE_LOCKED(sc));
   5655 
   5656 	wm_turnoff(sc);
   5657 
   5658 	/* Stop the one second clock. */
   5659 	callout_stop(&sc->sc_tick_ch);
   5660 
   5661 	/* Stop the 82547 Tx FIFO stall check timer. */
   5662 	if (sc->sc_type == WM_T_82547)
   5663 		callout_stop(&sc->sc_txfifo_ch);
   5664 
   5665 	if (sc->sc_flags & WM_F_HAS_MII) {
   5666 		/* Down the MII. */
   5667 		mii_down(&sc->sc_mii);
   5668 	} else {
   5669 #if 0
   5670 		/* Should we clear PHY's status properly? */
   5671 		wm_reset(sc);
   5672 #endif
   5673 	}
   5674 
   5675 	/* Stop the transmit and receive processes. */
   5676 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5677 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5678 	sc->sc_rctl &= ~RCTL_EN;
   5679 
   5680 	/*
   5681 	 * Clear the interrupt mask to ensure the device cannot assert its
   5682 	 * interrupt line.
   5683 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5684 	 * service any currently pending or shared interrupt.
   5685 	 */
   5686 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5687 	sc->sc_icr = 0;
   5688 	if (wm_is_using_msix(sc)) {
   5689 		if (sc->sc_type != WM_T_82574) {
   5690 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5691 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5692 		} else
   5693 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5694 	}
   5695 
   5696 	/* Release any queued transmit buffers. */
   5697 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5698 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5699 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5700 		mutex_enter(txq->txq_lock);
   5701 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5702 			txs = &txq->txq_soft[i];
   5703 			if (txs->txs_mbuf != NULL) {
   5704 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5705 				m_freem(txs->txs_mbuf);
   5706 				txs->txs_mbuf = NULL;
   5707 			}
   5708 		}
   5709 		mutex_exit(txq->txq_lock);
   5710 	}
   5711 
   5712 	/* Mark the interface as down and cancel the watchdog timer. */
   5713 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5714 	ifp->if_timer = 0;
   5715 
   5716 	if (disable) {
   5717 		for (i = 0; i < sc->sc_nqueues; i++) {
   5718 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5719 			mutex_enter(rxq->rxq_lock);
   5720 			wm_rxdrain(rxq);
   5721 			mutex_exit(rxq->rxq_lock);
   5722 		}
   5723 	}
   5724 
   5725 #if 0 /* notyet */
   5726 	if (sc->sc_type >= WM_T_82544)
   5727 		CSR_WRITE(sc, WMREG_WUC, 0);
   5728 #endif
   5729 }
   5730 
   5731 static void
   5732 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5733 {
   5734 	struct mbuf *m;
   5735 	int i;
   5736 
   5737 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5738 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5739 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5740 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5741 		    m->m_data, m->m_len, m->m_flags);
   5742 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5743 	    i, i == 1 ? "" : "s");
   5744 }
   5745 
   5746 /*
   5747  * wm_82547_txfifo_stall:
   5748  *
   5749  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5750  *	reset the FIFO pointers, and restart packet transmission.
   5751  */
   5752 static void
   5753 wm_82547_txfifo_stall(void *arg)
   5754 {
   5755 	struct wm_softc *sc = arg;
   5756 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5757 
   5758 	mutex_enter(txq->txq_lock);
   5759 
   5760 	if (txq->txq_stopping)
   5761 		goto out;
   5762 
   5763 	if (txq->txq_fifo_stall) {
   5764 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5765 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5766 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5767 			/*
   5768 			 * Packets have drained.  Stop transmitter, reset
   5769 			 * FIFO pointers, restart transmitter, and kick
   5770 			 * the packet queue.
   5771 			 */
   5772 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5773 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5774 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5775 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5776 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5777 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5778 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5779 			CSR_WRITE_FLUSH(sc);
   5780 
   5781 			txq->txq_fifo_head = 0;
   5782 			txq->txq_fifo_stall = 0;
   5783 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5784 		} else {
   5785 			/*
   5786 			 * Still waiting for packets to drain; try again in
   5787 			 * another tick.
   5788 			 */
   5789 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5790 		}
   5791 	}
   5792 
   5793 out:
   5794 	mutex_exit(txq->txq_lock);
   5795 }
   5796 
   5797 /*
   5798  * wm_82547_txfifo_bugchk:
   5799  *
   5800  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5801  *	prevent enqueueing a packet that would wrap around the end
   5802  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5803  *
   5804  *	We do this by checking the amount of space before the end
   5805  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5806  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5807  *	the internal FIFO pointers to the beginning, and restart
   5808  *	transmission on the interface.
   5809  */
   5810 #define	WM_FIFO_HDR		0x10
   5811 #define	WM_82547_PAD_LEN	0x3e0
   5812 static int
   5813 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5814 {
   5815 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5816 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5817 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5818 
   5819 	/* Just return if already stalled. */
   5820 	if (txq->txq_fifo_stall)
   5821 		return 1;
   5822 
   5823 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5824 		/* Stall only occurs in half-duplex mode. */
   5825 		goto send_packet;
   5826 	}
   5827 
   5828 	if (len >= WM_82547_PAD_LEN + space) {
   5829 		txq->txq_fifo_stall = 1;
   5830 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5831 		return 1;
   5832 	}
   5833 
   5834  send_packet:
   5835 	txq->txq_fifo_head += len;
   5836 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5837 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5838 
   5839 	return 0;
   5840 }
   5841 
   5842 static int
   5843 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5844 {
   5845 	int error;
   5846 
   5847 	/*
   5848 	 * Allocate the control data structures, and create and load the
   5849 	 * DMA map for it.
   5850 	 *
   5851 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5852 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5853 	 * both sets within the same 4G segment.
   5854 	 */
   5855 	if (sc->sc_type < WM_T_82544)
   5856 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5857 	else
   5858 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5859 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5860 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5861 	else
   5862 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5863 
   5864 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5865 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5866 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5867 		aprint_error_dev(sc->sc_dev,
   5868 		    "unable to allocate TX control data, error = %d\n",
   5869 		    error);
   5870 		goto fail_0;
   5871 	}
   5872 
   5873 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5874 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5875 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5876 		aprint_error_dev(sc->sc_dev,
   5877 		    "unable to map TX control data, error = %d\n", error);
   5878 		goto fail_1;
   5879 	}
   5880 
   5881 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5882 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5883 		aprint_error_dev(sc->sc_dev,
   5884 		    "unable to create TX control data DMA map, error = %d\n",
   5885 		    error);
   5886 		goto fail_2;
   5887 	}
   5888 
   5889 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5890 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5891 		aprint_error_dev(sc->sc_dev,
   5892 		    "unable to load TX control data DMA map, error = %d\n",
   5893 		    error);
   5894 		goto fail_3;
   5895 	}
   5896 
   5897 	return 0;
   5898 
   5899  fail_3:
   5900 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5901  fail_2:
   5902 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5903 	    WM_TXDESCS_SIZE(txq));
   5904  fail_1:
   5905 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5906  fail_0:
   5907 	return error;
   5908 }
   5909 
   5910 static void
   5911 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5912 {
   5913 
   5914 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5915 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5916 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5917 	    WM_TXDESCS_SIZE(txq));
   5918 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5919 }
   5920 
   5921 static int
   5922 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5923 {
   5924 	int error;
   5925 	size_t rxq_descs_size;
   5926 
   5927 	/*
   5928 	 * Allocate the control data structures, and create and load the
   5929 	 * DMA map for it.
   5930 	 *
   5931 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5932 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5933 	 * both sets within the same 4G segment.
   5934 	 */
   5935 	rxq->rxq_ndesc = WM_NRXDESC;
   5936 	if (sc->sc_type == WM_T_82574)
   5937 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5938 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5939 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5940 	else
   5941 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5942 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5943 
   5944 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5945 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5946 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5947 		aprint_error_dev(sc->sc_dev,
   5948 		    "unable to allocate RX control data, error = %d\n",
   5949 		    error);
   5950 		goto fail_0;
   5951 	}
   5952 
   5953 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5954 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5955 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5956 		aprint_error_dev(sc->sc_dev,
   5957 		    "unable to map RX control data, error = %d\n", error);
   5958 		goto fail_1;
   5959 	}
   5960 
   5961 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5962 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5963 		aprint_error_dev(sc->sc_dev,
   5964 		    "unable to create RX control data DMA map, error = %d\n",
   5965 		    error);
   5966 		goto fail_2;
   5967 	}
   5968 
   5969 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5970 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5971 		aprint_error_dev(sc->sc_dev,
   5972 		    "unable to load RX control data DMA map, error = %d\n",
   5973 		    error);
   5974 		goto fail_3;
   5975 	}
   5976 
   5977 	return 0;
   5978 
   5979  fail_3:
   5980 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5981  fail_2:
   5982 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5983 	    rxq_descs_size);
   5984  fail_1:
   5985 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5986  fail_0:
   5987 	return error;
   5988 }
   5989 
   5990 static void
   5991 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5992 {
   5993 
   5994 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5995 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5996 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5997 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5998 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5999 }
   6000 
   6001 
   6002 static int
   6003 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6004 {
   6005 	int i, error;
   6006 
   6007 	/* Create the transmit buffer DMA maps. */
   6008 	WM_TXQUEUELEN(txq) =
   6009 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6010 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6011 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6012 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6013 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6014 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6015 			aprint_error_dev(sc->sc_dev,
   6016 			    "unable to create Tx DMA map %d, error = %d\n",
   6017 			    i, error);
   6018 			goto fail;
   6019 		}
   6020 	}
   6021 
   6022 	return 0;
   6023 
   6024  fail:
   6025 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6026 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6027 			bus_dmamap_destroy(sc->sc_dmat,
   6028 			    txq->txq_soft[i].txs_dmamap);
   6029 	}
   6030 	return error;
   6031 }
   6032 
   6033 static void
   6034 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6035 {
   6036 	int i;
   6037 
   6038 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6039 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6040 			bus_dmamap_destroy(sc->sc_dmat,
   6041 			    txq->txq_soft[i].txs_dmamap);
   6042 	}
   6043 }
   6044 
   6045 static int
   6046 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6047 {
   6048 	int i, error;
   6049 
   6050 	/* Create the receive buffer DMA maps. */
   6051 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6052 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6053 			    MCLBYTES, 0, 0,
   6054 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6055 			aprint_error_dev(sc->sc_dev,
   6056 			    "unable to create Rx DMA map %d error = %d\n",
   6057 			    i, error);
   6058 			goto fail;
   6059 		}
   6060 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6061 	}
   6062 
   6063 	return 0;
   6064 
   6065  fail:
   6066 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6067 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6068 			bus_dmamap_destroy(sc->sc_dmat,
   6069 			    rxq->rxq_soft[i].rxs_dmamap);
   6070 	}
   6071 	return error;
   6072 }
   6073 
   6074 static void
   6075 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6076 {
   6077 	int i;
   6078 
   6079 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6080 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6081 			bus_dmamap_destroy(sc->sc_dmat,
   6082 			    rxq->rxq_soft[i].rxs_dmamap);
   6083 	}
   6084 }
   6085 
   6086 /*
   6087  * wm_alloc_quques:
   6088  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6089  */
   6090 static int
   6091 wm_alloc_txrx_queues(struct wm_softc *sc)
   6092 {
   6093 	int i, error, tx_done, rx_done;
   6094 
   6095 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6096 	    KM_SLEEP);
   6097 	if (sc->sc_queue == NULL) {
   6098 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6099 		error = ENOMEM;
   6100 		goto fail_0;
   6101 	}
   6102 
   6103 	/*
   6104 	 * For transmission
   6105 	 */
   6106 	error = 0;
   6107 	tx_done = 0;
   6108 	for (i = 0; i < sc->sc_nqueues; i++) {
   6109 #ifdef WM_EVENT_COUNTERS
   6110 		int j;
   6111 		const char *xname;
   6112 #endif
   6113 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6114 		txq->txq_sc = sc;
   6115 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6116 
   6117 		error = wm_alloc_tx_descs(sc, txq);
   6118 		if (error)
   6119 			break;
   6120 		error = wm_alloc_tx_buffer(sc, txq);
   6121 		if (error) {
   6122 			wm_free_tx_descs(sc, txq);
   6123 			break;
   6124 		}
   6125 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6126 		if (txq->txq_interq == NULL) {
   6127 			wm_free_tx_descs(sc, txq);
   6128 			wm_free_tx_buffer(sc, txq);
   6129 			error = ENOMEM;
   6130 			break;
   6131 		}
   6132 
   6133 #ifdef WM_EVENT_COUNTERS
   6134 		xname = device_xname(sc->sc_dev);
   6135 
   6136 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6137 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6138 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6139 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6140 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6141 
   6142 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6143 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6144 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6145 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6146 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6147 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6148 
   6149 		for (j = 0; j < WM_NTXSEGS; j++) {
   6150 			snprintf(txq->txq_txseg_evcnt_names[j],
   6151 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6152 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6153 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6154 		}
   6155 
   6156 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6157 
   6158 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6159 #endif /* WM_EVENT_COUNTERS */
   6160 
   6161 		tx_done++;
   6162 	}
   6163 	if (error)
   6164 		goto fail_1;
   6165 
   6166 	/*
   6167 	 * For recieve
   6168 	 */
   6169 	error = 0;
   6170 	rx_done = 0;
   6171 	for (i = 0; i < sc->sc_nqueues; i++) {
   6172 #ifdef WM_EVENT_COUNTERS
   6173 		const char *xname;
   6174 #endif
   6175 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6176 		rxq->rxq_sc = sc;
   6177 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6178 
   6179 		error = wm_alloc_rx_descs(sc, rxq);
   6180 		if (error)
   6181 			break;
   6182 
   6183 		error = wm_alloc_rx_buffer(sc, rxq);
   6184 		if (error) {
   6185 			wm_free_rx_descs(sc, rxq);
   6186 			break;
   6187 		}
   6188 
   6189 #ifdef WM_EVENT_COUNTERS
   6190 		xname = device_xname(sc->sc_dev);
   6191 
   6192 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6193 
   6194 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6195 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6196 #endif /* WM_EVENT_COUNTERS */
   6197 
   6198 		rx_done++;
   6199 	}
   6200 	if (error)
   6201 		goto fail_2;
   6202 
   6203 	return 0;
   6204 
   6205  fail_2:
   6206 	for (i = 0; i < rx_done; i++) {
   6207 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6208 		wm_free_rx_buffer(sc, rxq);
   6209 		wm_free_rx_descs(sc, rxq);
   6210 		if (rxq->rxq_lock)
   6211 			mutex_obj_free(rxq->rxq_lock);
   6212 	}
   6213  fail_1:
   6214 	for (i = 0; i < tx_done; i++) {
   6215 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6216 		pcq_destroy(txq->txq_interq);
   6217 		wm_free_tx_buffer(sc, txq);
   6218 		wm_free_tx_descs(sc, txq);
   6219 		if (txq->txq_lock)
   6220 			mutex_obj_free(txq->txq_lock);
   6221 	}
   6222 
   6223 	kmem_free(sc->sc_queue,
   6224 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6225  fail_0:
   6226 	return error;
   6227 }
   6228 
   6229 /*
   6230  * wm_free_quques:
   6231  *	Free {tx,rx}descs and {tx,rx} buffers
   6232  */
   6233 static void
   6234 wm_free_txrx_queues(struct wm_softc *sc)
   6235 {
   6236 	int i;
   6237 
   6238 	for (i = 0; i < sc->sc_nqueues; i++) {
   6239 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6240 
   6241 #ifdef WM_EVENT_COUNTERS
   6242 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6243 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6244 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6245 #endif /* WM_EVENT_COUNTERS */
   6246 
   6247 		wm_free_rx_buffer(sc, rxq);
   6248 		wm_free_rx_descs(sc, rxq);
   6249 		if (rxq->rxq_lock)
   6250 			mutex_obj_free(rxq->rxq_lock);
   6251 	}
   6252 
   6253 	for (i = 0; i < sc->sc_nqueues; i++) {
   6254 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6255 		struct mbuf *m;
   6256 #ifdef WM_EVENT_COUNTERS
   6257 		int j;
   6258 
   6259 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6260 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6261 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6262 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6263 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6264 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6265 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6266 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6267 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6268 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6269 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6270 
   6271 		for (j = 0; j < WM_NTXSEGS; j++)
   6272 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6273 
   6274 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6275 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6276 #endif /* WM_EVENT_COUNTERS */
   6277 
   6278 		/* drain txq_interq */
   6279 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6280 			m_freem(m);
   6281 		pcq_destroy(txq->txq_interq);
   6282 
   6283 		wm_free_tx_buffer(sc, txq);
   6284 		wm_free_tx_descs(sc, txq);
   6285 		if (txq->txq_lock)
   6286 			mutex_obj_free(txq->txq_lock);
   6287 	}
   6288 
   6289 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6290 }
   6291 
   6292 static void
   6293 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6294 {
   6295 
   6296 	KASSERT(mutex_owned(txq->txq_lock));
   6297 
   6298 	/* Initialize the transmit descriptor ring. */
   6299 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6300 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6301 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6302 	txq->txq_free = WM_NTXDESC(txq);
   6303 	txq->txq_next = 0;
   6304 }
   6305 
   6306 static void
   6307 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6308     struct wm_txqueue *txq)
   6309 {
   6310 
   6311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6312 		device_xname(sc->sc_dev), __func__));
   6313 	KASSERT(mutex_owned(txq->txq_lock));
   6314 
   6315 	if (sc->sc_type < WM_T_82543) {
   6316 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6317 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6318 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6319 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6320 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6321 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6322 	} else {
   6323 		int qid = wmq->wmq_id;
   6324 
   6325 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6326 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6327 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6328 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6329 
   6330 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6331 			/*
   6332 			 * Don't write TDT before TCTL.EN is set.
   6333 			 * See the document.
   6334 			 */
   6335 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6336 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6337 			    | TXDCTL_WTHRESH(0));
   6338 		else {
   6339 			/* XXX should update with AIM? */
   6340 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6341 			if (sc->sc_type >= WM_T_82540) {
   6342 				/* should be same */
   6343 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6344 			}
   6345 
   6346 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6347 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6348 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6349 		}
   6350 	}
   6351 }
   6352 
   6353 static void
   6354 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6355 {
   6356 	int i;
   6357 
   6358 	KASSERT(mutex_owned(txq->txq_lock));
   6359 
   6360 	/* Initialize the transmit job descriptors. */
   6361 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6362 		txq->txq_soft[i].txs_mbuf = NULL;
   6363 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6364 	txq->txq_snext = 0;
   6365 	txq->txq_sdirty = 0;
   6366 }
   6367 
   6368 static void
   6369 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6370     struct wm_txqueue *txq)
   6371 {
   6372 
   6373 	KASSERT(mutex_owned(txq->txq_lock));
   6374 
   6375 	/*
   6376 	 * Set up some register offsets that are different between
   6377 	 * the i82542 and the i82543 and later chips.
   6378 	 */
   6379 	if (sc->sc_type < WM_T_82543)
   6380 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6381 	else
   6382 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6383 
   6384 	wm_init_tx_descs(sc, txq);
   6385 	wm_init_tx_regs(sc, wmq, txq);
   6386 	wm_init_tx_buffer(sc, txq);
   6387 }
   6388 
   6389 static void
   6390 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6391     struct wm_rxqueue *rxq)
   6392 {
   6393 
   6394 	KASSERT(mutex_owned(rxq->rxq_lock));
   6395 
   6396 	/*
   6397 	 * Initialize the receive descriptor and receive job
   6398 	 * descriptor rings.
   6399 	 */
   6400 	if (sc->sc_type < WM_T_82543) {
   6401 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6402 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6403 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6404 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6405 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6406 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6407 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6408 
   6409 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6410 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6411 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6412 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6413 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6414 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6415 	} else {
   6416 		int qid = wmq->wmq_id;
   6417 
   6418 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6419 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6420 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6421 
   6422 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6423 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6424 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6425 
   6426 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6427 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6428 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6429 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6430 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6431 			    | RXDCTL_WTHRESH(1));
   6432 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6433 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6434 		} else {
   6435 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6436 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6437 			/* XXX should update with AIM? */
   6438 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6439 			/* MUST be same */
   6440 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6441 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6442 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6443 		}
   6444 	}
   6445 }
   6446 
   6447 static int
   6448 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6449 {
   6450 	struct wm_rxsoft *rxs;
   6451 	int error, i;
   6452 
   6453 	KASSERT(mutex_owned(rxq->rxq_lock));
   6454 
   6455 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6456 		rxs = &rxq->rxq_soft[i];
   6457 		if (rxs->rxs_mbuf == NULL) {
   6458 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6459 				log(LOG_ERR, "%s: unable to allocate or map "
   6460 				    "rx buffer %d, error = %d\n",
   6461 				    device_xname(sc->sc_dev), i, error);
   6462 				/*
   6463 				 * XXX Should attempt to run with fewer receive
   6464 				 * XXX buffers instead of just failing.
   6465 				 */
   6466 				wm_rxdrain(rxq);
   6467 				return ENOMEM;
   6468 			}
   6469 		} else {
   6470 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6471 				wm_init_rxdesc(rxq, i);
   6472 			/*
   6473 			 * For 82575 and newer device, the RX descriptors
   6474 			 * must be initialized after the setting of RCTL.EN in
   6475 			 * wm_set_filter()
   6476 			 */
   6477 		}
   6478 	}
   6479 	rxq->rxq_ptr = 0;
   6480 	rxq->rxq_discard = 0;
   6481 	WM_RXCHAIN_RESET(rxq);
   6482 
   6483 	return 0;
   6484 }
   6485 
   6486 static int
   6487 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6488     struct wm_rxqueue *rxq)
   6489 {
   6490 
   6491 	KASSERT(mutex_owned(rxq->rxq_lock));
   6492 
   6493 	/*
   6494 	 * Set up some register offsets that are different between
   6495 	 * the i82542 and the i82543 and later chips.
   6496 	 */
   6497 	if (sc->sc_type < WM_T_82543)
   6498 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6499 	else
   6500 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6501 
   6502 	wm_init_rx_regs(sc, wmq, rxq);
   6503 	return wm_init_rx_buffer(sc, rxq);
   6504 }
   6505 
   6506 /*
   6507  * wm_init_quques:
   6508  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6509  */
   6510 static int
   6511 wm_init_txrx_queues(struct wm_softc *sc)
   6512 {
   6513 	int i, error = 0;
   6514 
   6515 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6516 		device_xname(sc->sc_dev), __func__));
   6517 
   6518 	for (i = 0; i < sc->sc_nqueues; i++) {
   6519 		struct wm_queue *wmq = &sc->sc_queue[i];
   6520 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6521 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6522 
   6523 		/*
   6524 		 * TODO
   6525 		 * Currently, use constant variable instead of AIM.
   6526 		 * Furthermore, the interrupt interval of multiqueue which use
   6527 		 * polling mode is less than default value.
   6528 		 * More tuning and AIM are required.
   6529 		 */
   6530 		if (wm_is_using_multiqueue(sc))
   6531 			wmq->wmq_itr = 50;
   6532 		else
   6533 			wmq->wmq_itr = sc->sc_itr_init;
   6534 		wmq->wmq_set_itr = true;
   6535 
   6536 		mutex_enter(txq->txq_lock);
   6537 		wm_init_tx_queue(sc, wmq, txq);
   6538 		mutex_exit(txq->txq_lock);
   6539 
   6540 		mutex_enter(rxq->rxq_lock);
   6541 		error = wm_init_rx_queue(sc, wmq, rxq);
   6542 		mutex_exit(rxq->rxq_lock);
   6543 		if (error)
   6544 			break;
   6545 	}
   6546 
   6547 	return error;
   6548 }
   6549 
   6550 /*
   6551  * wm_tx_offload:
   6552  *
   6553  *	Set up TCP/IP checksumming parameters for the
   6554  *	specified packet.
   6555  */
   6556 static int
   6557 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6558     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6559 {
   6560 	struct mbuf *m0 = txs->txs_mbuf;
   6561 	struct livengood_tcpip_ctxdesc *t;
   6562 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6563 	uint32_t ipcse;
   6564 	struct ether_header *eh;
   6565 	int offset, iphl;
   6566 	uint8_t fields;
   6567 
   6568 	/*
   6569 	 * XXX It would be nice if the mbuf pkthdr had offset
   6570 	 * fields for the protocol headers.
   6571 	 */
   6572 
   6573 	eh = mtod(m0, struct ether_header *);
   6574 	switch (htons(eh->ether_type)) {
   6575 	case ETHERTYPE_IP:
   6576 	case ETHERTYPE_IPV6:
   6577 		offset = ETHER_HDR_LEN;
   6578 		break;
   6579 
   6580 	case ETHERTYPE_VLAN:
   6581 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6582 		break;
   6583 
   6584 	default:
   6585 		/*
   6586 		 * Don't support this protocol or encapsulation.
   6587 		 */
   6588 		*fieldsp = 0;
   6589 		*cmdp = 0;
   6590 		return 0;
   6591 	}
   6592 
   6593 	if ((m0->m_pkthdr.csum_flags &
   6594 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6595 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6596 	} else {
   6597 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6598 	}
   6599 	ipcse = offset + iphl - 1;
   6600 
   6601 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6602 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6603 	seg = 0;
   6604 	fields = 0;
   6605 
   6606 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6607 		int hlen = offset + iphl;
   6608 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6609 
   6610 		if (__predict_false(m0->m_len <
   6611 				    (hlen + sizeof(struct tcphdr)))) {
   6612 			/*
   6613 			 * TCP/IP headers are not in the first mbuf; we need
   6614 			 * to do this the slow and painful way.  Let's just
   6615 			 * hope this doesn't happen very often.
   6616 			 */
   6617 			struct tcphdr th;
   6618 
   6619 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6620 
   6621 			m_copydata(m0, hlen, sizeof(th), &th);
   6622 			if (v4) {
   6623 				struct ip ip;
   6624 
   6625 				m_copydata(m0, offset, sizeof(ip), &ip);
   6626 				ip.ip_len = 0;
   6627 				m_copyback(m0,
   6628 				    offset + offsetof(struct ip, ip_len),
   6629 				    sizeof(ip.ip_len), &ip.ip_len);
   6630 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6631 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6632 			} else {
   6633 				struct ip6_hdr ip6;
   6634 
   6635 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6636 				ip6.ip6_plen = 0;
   6637 				m_copyback(m0,
   6638 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6639 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6640 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6641 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6642 			}
   6643 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6644 			    sizeof(th.th_sum), &th.th_sum);
   6645 
   6646 			hlen += th.th_off << 2;
   6647 		} else {
   6648 			/*
   6649 			 * TCP/IP headers are in the first mbuf; we can do
   6650 			 * this the easy way.
   6651 			 */
   6652 			struct tcphdr *th;
   6653 
   6654 			if (v4) {
   6655 				struct ip *ip =
   6656 				    (void *)(mtod(m0, char *) + offset);
   6657 				th = (void *)(mtod(m0, char *) + hlen);
   6658 
   6659 				ip->ip_len = 0;
   6660 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6661 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6662 			} else {
   6663 				struct ip6_hdr *ip6 =
   6664 				    (void *)(mtod(m0, char *) + offset);
   6665 				th = (void *)(mtod(m0, char *) + hlen);
   6666 
   6667 				ip6->ip6_plen = 0;
   6668 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6669 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6670 			}
   6671 			hlen += th->th_off << 2;
   6672 		}
   6673 
   6674 		if (v4) {
   6675 			WM_Q_EVCNT_INCR(txq, txtso);
   6676 			cmdlen |= WTX_TCPIP_CMD_IP;
   6677 		} else {
   6678 			WM_Q_EVCNT_INCR(txq, txtso6);
   6679 			ipcse = 0;
   6680 		}
   6681 		cmd |= WTX_TCPIP_CMD_TSE;
   6682 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6683 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6684 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6685 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6686 	}
   6687 
   6688 	/*
   6689 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6690 	 * offload feature, if we load the context descriptor, we
   6691 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6692 	 */
   6693 
   6694 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6695 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6696 	    WTX_TCPIP_IPCSE(ipcse);
   6697 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6698 		WM_Q_EVCNT_INCR(txq, txipsum);
   6699 		fields |= WTX_IXSM;
   6700 	}
   6701 
   6702 	offset += iphl;
   6703 
   6704 	if (m0->m_pkthdr.csum_flags &
   6705 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6706 		WM_Q_EVCNT_INCR(txq, txtusum);
   6707 		fields |= WTX_TXSM;
   6708 		tucs = WTX_TCPIP_TUCSS(offset) |
   6709 		    WTX_TCPIP_TUCSO(offset +
   6710 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6711 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6712 	} else if ((m0->m_pkthdr.csum_flags &
   6713 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6714 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6715 		fields |= WTX_TXSM;
   6716 		tucs = WTX_TCPIP_TUCSS(offset) |
   6717 		    WTX_TCPIP_TUCSO(offset +
   6718 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6719 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6720 	} else {
   6721 		/* Just initialize it to a valid TCP context. */
   6722 		tucs = WTX_TCPIP_TUCSS(offset) |
   6723 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6724 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6725 	}
   6726 
   6727 	/*
   6728 	 * We don't have to write context descriptor for every packet
   6729 	 * except for 82574. For 82574, we must write context descriptor
   6730 	 * for every packet when we use two descriptor queues.
   6731 	 * It would be overhead to write context descriptor for every packet,
   6732 	 * however it does not cause problems.
   6733 	 */
   6734 	/* Fill in the context descriptor. */
   6735 	t = (struct livengood_tcpip_ctxdesc *)
   6736 	    &txq->txq_descs[txq->txq_next];
   6737 	t->tcpip_ipcs = htole32(ipcs);
   6738 	t->tcpip_tucs = htole32(tucs);
   6739 	t->tcpip_cmdlen = htole32(cmdlen);
   6740 	t->tcpip_seg = htole32(seg);
   6741 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6742 
   6743 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6744 	txs->txs_ndesc++;
   6745 
   6746 	*cmdp = cmd;
   6747 	*fieldsp = fields;
   6748 
   6749 	return 0;
   6750 }
   6751 
   6752 static inline int
   6753 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6754 {
   6755 	struct wm_softc *sc = ifp->if_softc;
   6756 	u_int cpuid = cpu_index(curcpu());
   6757 
   6758 	/*
   6759 	 * Currently, simple distribute strategy.
   6760 	 * TODO:
   6761 	 * distribute by flowid(RSS has value).
   6762 	 */
   6763         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6764 }
   6765 
   6766 /*
   6767  * wm_start:		[ifnet interface function]
   6768  *
   6769  *	Start packet transmission on the interface.
   6770  */
   6771 static void
   6772 wm_start(struct ifnet *ifp)
   6773 {
   6774 	struct wm_softc *sc = ifp->if_softc;
   6775 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6776 
   6777 #ifdef WM_MPSAFE
   6778 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6779 #endif
   6780 	/*
   6781 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6782 	 */
   6783 
   6784 	mutex_enter(txq->txq_lock);
   6785 	if (!txq->txq_stopping)
   6786 		wm_start_locked(ifp);
   6787 	mutex_exit(txq->txq_lock);
   6788 }
   6789 
   6790 static void
   6791 wm_start_locked(struct ifnet *ifp)
   6792 {
   6793 	struct wm_softc *sc = ifp->if_softc;
   6794 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6795 
   6796 	wm_send_common_locked(ifp, txq, false);
   6797 }
   6798 
   6799 static int
   6800 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6801 {
   6802 	int qid;
   6803 	struct wm_softc *sc = ifp->if_softc;
   6804 	struct wm_txqueue *txq;
   6805 
   6806 	qid = wm_select_txqueue(ifp, m);
   6807 	txq = &sc->sc_queue[qid].wmq_txq;
   6808 
   6809 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6810 		m_freem(m);
   6811 		WM_Q_EVCNT_INCR(txq, txdrop);
   6812 		return ENOBUFS;
   6813 	}
   6814 
   6815 	/*
   6816 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6817 	 */
   6818 	ifp->if_obytes += m->m_pkthdr.len;
   6819 	if (m->m_flags & M_MCAST)
   6820 		ifp->if_omcasts++;
   6821 
   6822 	if (mutex_tryenter(txq->txq_lock)) {
   6823 		if (!txq->txq_stopping)
   6824 			wm_transmit_locked(ifp, txq);
   6825 		mutex_exit(txq->txq_lock);
   6826 	}
   6827 
   6828 	return 0;
   6829 }
   6830 
   6831 static void
   6832 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6833 {
   6834 
   6835 	wm_send_common_locked(ifp, txq, true);
   6836 }
   6837 
   6838 static void
   6839 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6840     bool is_transmit)
   6841 {
   6842 	struct wm_softc *sc = ifp->if_softc;
   6843 	struct mbuf *m0;
   6844 	struct m_tag *mtag;
   6845 	struct wm_txsoft *txs;
   6846 	bus_dmamap_t dmamap;
   6847 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6848 	bus_addr_t curaddr;
   6849 	bus_size_t seglen, curlen;
   6850 	uint32_t cksumcmd;
   6851 	uint8_t cksumfields;
   6852 
   6853 	KASSERT(mutex_owned(txq->txq_lock));
   6854 
   6855 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6856 		return;
   6857 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6858 		return;
   6859 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6860 		return;
   6861 
   6862 	/* Remember the previous number of free descriptors. */
   6863 	ofree = txq->txq_free;
   6864 
   6865 	/*
   6866 	 * Loop through the send queue, setting up transmit descriptors
   6867 	 * until we drain the queue, or use up all available transmit
   6868 	 * descriptors.
   6869 	 */
   6870 	for (;;) {
   6871 		m0 = NULL;
   6872 
   6873 		/* Get a work queue entry. */
   6874 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6875 			wm_txeof(sc, txq);
   6876 			if (txq->txq_sfree == 0) {
   6877 				DPRINTF(WM_DEBUG_TX,
   6878 				    ("%s: TX: no free job descriptors\n",
   6879 					device_xname(sc->sc_dev)));
   6880 				WM_Q_EVCNT_INCR(txq, txsstall);
   6881 				break;
   6882 			}
   6883 		}
   6884 
   6885 		/* Grab a packet off the queue. */
   6886 		if (is_transmit)
   6887 			m0 = pcq_get(txq->txq_interq);
   6888 		else
   6889 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6890 		if (m0 == NULL)
   6891 			break;
   6892 
   6893 		DPRINTF(WM_DEBUG_TX,
   6894 		    ("%s: TX: have packet to transmit: %p\n",
   6895 		    device_xname(sc->sc_dev), m0));
   6896 
   6897 		txs = &txq->txq_soft[txq->txq_snext];
   6898 		dmamap = txs->txs_dmamap;
   6899 
   6900 		use_tso = (m0->m_pkthdr.csum_flags &
   6901 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6902 
   6903 		/*
   6904 		 * So says the Linux driver:
   6905 		 * The controller does a simple calculation to make sure
   6906 		 * there is enough room in the FIFO before initiating the
   6907 		 * DMA for each buffer.  The calc is:
   6908 		 *	4 = ceil(buffer len / MSS)
   6909 		 * To make sure we don't overrun the FIFO, adjust the max
   6910 		 * buffer len if the MSS drops.
   6911 		 */
   6912 		dmamap->dm_maxsegsz =
   6913 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6914 		    ? m0->m_pkthdr.segsz << 2
   6915 		    : WTX_MAX_LEN;
   6916 
   6917 		/*
   6918 		 * Load the DMA map.  If this fails, the packet either
   6919 		 * didn't fit in the allotted number of segments, or we
   6920 		 * were short on resources.  For the too-many-segments
   6921 		 * case, we simply report an error and drop the packet,
   6922 		 * since we can't sanely copy a jumbo packet to a single
   6923 		 * buffer.
   6924 		 */
   6925 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6926 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6927 		if (error) {
   6928 			if (error == EFBIG) {
   6929 				WM_Q_EVCNT_INCR(txq, txdrop);
   6930 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6931 				    "DMA segments, dropping...\n",
   6932 				    device_xname(sc->sc_dev));
   6933 				wm_dump_mbuf_chain(sc, m0);
   6934 				m_freem(m0);
   6935 				continue;
   6936 			}
   6937 			/*  Short on resources, just stop for now. */
   6938 			DPRINTF(WM_DEBUG_TX,
   6939 			    ("%s: TX: dmamap load failed: %d\n",
   6940 			    device_xname(sc->sc_dev), error));
   6941 			break;
   6942 		}
   6943 
   6944 		segs_needed = dmamap->dm_nsegs;
   6945 		if (use_tso) {
   6946 			/* For sentinel descriptor; see below. */
   6947 			segs_needed++;
   6948 		}
   6949 
   6950 		/*
   6951 		 * Ensure we have enough descriptors free to describe
   6952 		 * the packet.  Note, we always reserve one descriptor
   6953 		 * at the end of the ring due to the semantics of the
   6954 		 * TDT register, plus one more in the event we need
   6955 		 * to load offload context.
   6956 		 */
   6957 		if (segs_needed > txq->txq_free - 2) {
   6958 			/*
   6959 			 * Not enough free descriptors to transmit this
   6960 			 * packet.  We haven't committed anything yet,
   6961 			 * so just unload the DMA map, put the packet
   6962 			 * pack on the queue, and punt.  Notify the upper
   6963 			 * layer that there are no more slots left.
   6964 			 */
   6965 			DPRINTF(WM_DEBUG_TX,
   6966 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6967 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6968 			    segs_needed, txq->txq_free - 1));
   6969 			if (!is_transmit)
   6970 				ifp->if_flags |= IFF_OACTIVE;
   6971 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6972 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6973 			WM_Q_EVCNT_INCR(txq, txdstall);
   6974 			break;
   6975 		}
   6976 
   6977 		/*
   6978 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6979 		 * once we know we can transmit the packet, since we
   6980 		 * do some internal FIFO space accounting here.
   6981 		 */
   6982 		if (sc->sc_type == WM_T_82547 &&
   6983 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6984 			DPRINTF(WM_DEBUG_TX,
   6985 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6986 			    device_xname(sc->sc_dev)));
   6987 			if (!is_transmit)
   6988 				ifp->if_flags |= IFF_OACTIVE;
   6989 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6990 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6991 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6992 			break;
   6993 		}
   6994 
   6995 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6996 
   6997 		DPRINTF(WM_DEBUG_TX,
   6998 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6999 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7000 
   7001 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7002 
   7003 		/*
   7004 		 * Store a pointer to the packet so that we can free it
   7005 		 * later.
   7006 		 *
   7007 		 * Initially, we consider the number of descriptors the
   7008 		 * packet uses the number of DMA segments.  This may be
   7009 		 * incremented by 1 if we do checksum offload (a descriptor
   7010 		 * is used to set the checksum context).
   7011 		 */
   7012 		txs->txs_mbuf = m0;
   7013 		txs->txs_firstdesc = txq->txq_next;
   7014 		txs->txs_ndesc = segs_needed;
   7015 
   7016 		/* Set up offload parameters for this packet. */
   7017 		if (m0->m_pkthdr.csum_flags &
   7018 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7019 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7020 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7021 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7022 					  &cksumfields) != 0) {
   7023 				/* Error message already displayed. */
   7024 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7025 				continue;
   7026 			}
   7027 		} else {
   7028 			cksumcmd = 0;
   7029 			cksumfields = 0;
   7030 		}
   7031 
   7032 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7033 
   7034 		/* Sync the DMA map. */
   7035 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7036 		    BUS_DMASYNC_PREWRITE);
   7037 
   7038 		/* Initialize the transmit descriptor. */
   7039 		for (nexttx = txq->txq_next, seg = 0;
   7040 		     seg < dmamap->dm_nsegs; seg++) {
   7041 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7042 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7043 			     seglen != 0;
   7044 			     curaddr += curlen, seglen -= curlen,
   7045 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7046 				curlen = seglen;
   7047 
   7048 				/*
   7049 				 * So says the Linux driver:
   7050 				 * Work around for premature descriptor
   7051 				 * write-backs in TSO mode.  Append a
   7052 				 * 4-byte sentinel descriptor.
   7053 				 */
   7054 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7055 				    curlen > 8)
   7056 					curlen -= 4;
   7057 
   7058 				wm_set_dma_addr(
   7059 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7060 				txq->txq_descs[nexttx].wtx_cmdlen
   7061 				    = htole32(cksumcmd | curlen);
   7062 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7063 				    = 0;
   7064 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7065 				    = cksumfields;
   7066 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7067 				lasttx = nexttx;
   7068 
   7069 				DPRINTF(WM_DEBUG_TX,
   7070 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7071 				     "len %#04zx\n",
   7072 				    device_xname(sc->sc_dev), nexttx,
   7073 				    (uint64_t)curaddr, curlen));
   7074 			}
   7075 		}
   7076 
   7077 		KASSERT(lasttx != -1);
   7078 
   7079 		/*
   7080 		 * Set up the command byte on the last descriptor of
   7081 		 * the packet.  If we're in the interrupt delay window,
   7082 		 * delay the interrupt.
   7083 		 */
   7084 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7085 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7086 
   7087 		/*
   7088 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7089 		 * up the descriptor to encapsulate the packet for us.
   7090 		 *
   7091 		 * This is only valid on the last descriptor of the packet.
   7092 		 */
   7093 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7094 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7095 			    htole32(WTX_CMD_VLE);
   7096 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7097 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7098 		}
   7099 
   7100 		txs->txs_lastdesc = lasttx;
   7101 
   7102 		DPRINTF(WM_DEBUG_TX,
   7103 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7104 		    device_xname(sc->sc_dev),
   7105 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7106 
   7107 		/* Sync the descriptors we're using. */
   7108 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7109 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7110 
   7111 		/* Give the packet to the chip. */
   7112 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7113 
   7114 		DPRINTF(WM_DEBUG_TX,
   7115 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7116 
   7117 		DPRINTF(WM_DEBUG_TX,
   7118 		    ("%s: TX: finished transmitting packet, job %d\n",
   7119 		    device_xname(sc->sc_dev), txq->txq_snext));
   7120 
   7121 		/* Advance the tx pointer. */
   7122 		txq->txq_free -= txs->txs_ndesc;
   7123 		txq->txq_next = nexttx;
   7124 
   7125 		txq->txq_sfree--;
   7126 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7127 
   7128 		/* Pass the packet to any BPF listeners. */
   7129 		bpf_mtap(ifp, m0);
   7130 	}
   7131 
   7132 	if (m0 != NULL) {
   7133 		if (!is_transmit)
   7134 			ifp->if_flags |= IFF_OACTIVE;
   7135 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7136 		WM_Q_EVCNT_INCR(txq, txdrop);
   7137 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7138 			__func__));
   7139 		m_freem(m0);
   7140 	}
   7141 
   7142 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7143 		/* No more slots; notify upper layer. */
   7144 		if (!is_transmit)
   7145 			ifp->if_flags |= IFF_OACTIVE;
   7146 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7147 	}
   7148 
   7149 	if (txq->txq_free != ofree) {
   7150 		/* Set a watchdog timer in case the chip flakes out. */
   7151 		ifp->if_timer = 5;
   7152 	}
   7153 }
   7154 
   7155 /*
   7156  * wm_nq_tx_offload:
   7157  *
   7158  *	Set up TCP/IP checksumming parameters for the
   7159  *	specified packet, for NEWQUEUE devices
   7160  */
   7161 static int
   7162 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7163     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7164 {
   7165 	struct mbuf *m0 = txs->txs_mbuf;
   7166 	struct m_tag *mtag;
   7167 	uint32_t vl_len, mssidx, cmdc;
   7168 	struct ether_header *eh;
   7169 	int offset, iphl;
   7170 
   7171 	/*
   7172 	 * XXX It would be nice if the mbuf pkthdr had offset
   7173 	 * fields for the protocol headers.
   7174 	 */
   7175 	*cmdlenp = 0;
   7176 	*fieldsp = 0;
   7177 
   7178 	eh = mtod(m0, struct ether_header *);
   7179 	switch (htons(eh->ether_type)) {
   7180 	case ETHERTYPE_IP:
   7181 	case ETHERTYPE_IPV6:
   7182 		offset = ETHER_HDR_LEN;
   7183 		break;
   7184 
   7185 	case ETHERTYPE_VLAN:
   7186 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7187 		break;
   7188 
   7189 	default:
   7190 		/* Don't support this protocol or encapsulation. */
   7191 		*do_csum = false;
   7192 		return 0;
   7193 	}
   7194 	*do_csum = true;
   7195 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7196 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7197 
   7198 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7199 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7200 
   7201 	if ((m0->m_pkthdr.csum_flags &
   7202 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7203 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7204 	} else {
   7205 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7206 	}
   7207 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7208 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7209 
   7210 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7211 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7212 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7213 		*cmdlenp |= NQTX_CMD_VLE;
   7214 	}
   7215 
   7216 	mssidx = 0;
   7217 
   7218 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7219 		int hlen = offset + iphl;
   7220 		int tcp_hlen;
   7221 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7222 
   7223 		if (__predict_false(m0->m_len <
   7224 				    (hlen + sizeof(struct tcphdr)))) {
   7225 			/*
   7226 			 * TCP/IP headers are not in the first mbuf; we need
   7227 			 * to do this the slow and painful way.  Let's just
   7228 			 * hope this doesn't happen very often.
   7229 			 */
   7230 			struct tcphdr th;
   7231 
   7232 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7233 
   7234 			m_copydata(m0, hlen, sizeof(th), &th);
   7235 			if (v4) {
   7236 				struct ip ip;
   7237 
   7238 				m_copydata(m0, offset, sizeof(ip), &ip);
   7239 				ip.ip_len = 0;
   7240 				m_copyback(m0,
   7241 				    offset + offsetof(struct ip, ip_len),
   7242 				    sizeof(ip.ip_len), &ip.ip_len);
   7243 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7244 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7245 			} else {
   7246 				struct ip6_hdr ip6;
   7247 
   7248 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7249 				ip6.ip6_plen = 0;
   7250 				m_copyback(m0,
   7251 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7252 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7253 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7254 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7255 			}
   7256 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7257 			    sizeof(th.th_sum), &th.th_sum);
   7258 
   7259 			tcp_hlen = th.th_off << 2;
   7260 		} else {
   7261 			/*
   7262 			 * TCP/IP headers are in the first mbuf; we can do
   7263 			 * this the easy way.
   7264 			 */
   7265 			struct tcphdr *th;
   7266 
   7267 			if (v4) {
   7268 				struct ip *ip =
   7269 				    (void *)(mtod(m0, char *) + offset);
   7270 				th = (void *)(mtod(m0, char *) + hlen);
   7271 
   7272 				ip->ip_len = 0;
   7273 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7274 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7275 			} else {
   7276 				struct ip6_hdr *ip6 =
   7277 				    (void *)(mtod(m0, char *) + offset);
   7278 				th = (void *)(mtod(m0, char *) + hlen);
   7279 
   7280 				ip6->ip6_plen = 0;
   7281 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7282 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7283 			}
   7284 			tcp_hlen = th->th_off << 2;
   7285 		}
   7286 		hlen += tcp_hlen;
   7287 		*cmdlenp |= NQTX_CMD_TSE;
   7288 
   7289 		if (v4) {
   7290 			WM_Q_EVCNT_INCR(txq, txtso);
   7291 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7292 		} else {
   7293 			WM_Q_EVCNT_INCR(txq, txtso6);
   7294 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7295 		}
   7296 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7297 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7298 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7299 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7300 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7301 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7302 	} else {
   7303 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7304 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7305 	}
   7306 
   7307 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7308 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7309 		cmdc |= NQTXC_CMD_IP4;
   7310 	}
   7311 
   7312 	if (m0->m_pkthdr.csum_flags &
   7313 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7314 		WM_Q_EVCNT_INCR(txq, txtusum);
   7315 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7316 			cmdc |= NQTXC_CMD_TCP;
   7317 		} else {
   7318 			cmdc |= NQTXC_CMD_UDP;
   7319 		}
   7320 		cmdc |= NQTXC_CMD_IP4;
   7321 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7322 	}
   7323 	if (m0->m_pkthdr.csum_flags &
   7324 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7325 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7326 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7327 			cmdc |= NQTXC_CMD_TCP;
   7328 		} else {
   7329 			cmdc |= NQTXC_CMD_UDP;
   7330 		}
   7331 		cmdc |= NQTXC_CMD_IP6;
   7332 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7333 	}
   7334 
   7335 	/*
   7336 	 * We don't have to write context descriptor for every packet to
   7337 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7338 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7339 	 * controllers.
   7340 	 * It would be overhead to write context descriptor for every packet,
   7341 	 * however it does not cause problems.
   7342 	 */
   7343 	/* Fill in the context descriptor. */
   7344 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7345 	    htole32(vl_len);
   7346 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7347 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7348 	    htole32(cmdc);
   7349 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7350 	    htole32(mssidx);
   7351 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7352 	DPRINTF(WM_DEBUG_TX,
   7353 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7354 	    txq->txq_next, 0, vl_len));
   7355 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7356 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7357 	txs->txs_ndesc++;
   7358 	return 0;
   7359 }
   7360 
   7361 /*
   7362  * wm_nq_start:		[ifnet interface function]
   7363  *
   7364  *	Start packet transmission on the interface for NEWQUEUE devices
   7365  */
   7366 static void
   7367 wm_nq_start(struct ifnet *ifp)
   7368 {
   7369 	struct wm_softc *sc = ifp->if_softc;
   7370 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7371 
   7372 #ifdef WM_MPSAFE
   7373 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7374 #endif
   7375 	/*
   7376 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7377 	 */
   7378 
   7379 	mutex_enter(txq->txq_lock);
   7380 	if (!txq->txq_stopping)
   7381 		wm_nq_start_locked(ifp);
   7382 	mutex_exit(txq->txq_lock);
   7383 }
   7384 
   7385 static void
   7386 wm_nq_start_locked(struct ifnet *ifp)
   7387 {
   7388 	struct wm_softc *sc = ifp->if_softc;
   7389 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7390 
   7391 	wm_nq_send_common_locked(ifp, txq, false);
   7392 }
   7393 
   7394 static int
   7395 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7396 {
   7397 	int qid;
   7398 	struct wm_softc *sc = ifp->if_softc;
   7399 	struct wm_txqueue *txq;
   7400 
   7401 	qid = wm_select_txqueue(ifp, m);
   7402 	txq = &sc->sc_queue[qid].wmq_txq;
   7403 
   7404 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7405 		m_freem(m);
   7406 		WM_Q_EVCNT_INCR(txq, txdrop);
   7407 		return ENOBUFS;
   7408 	}
   7409 
   7410 	/*
   7411 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7412 	 */
   7413 	ifp->if_obytes += m->m_pkthdr.len;
   7414 	if (m->m_flags & M_MCAST)
   7415 		ifp->if_omcasts++;
   7416 
   7417 	/*
   7418 	 * The situations which this mutex_tryenter() fails at running time
   7419 	 * are below two patterns.
   7420 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7421 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7422 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7423 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7424 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7425 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7426 	 */
   7427 	if (mutex_tryenter(txq->txq_lock)) {
   7428 		if (!txq->txq_stopping)
   7429 			wm_nq_transmit_locked(ifp, txq);
   7430 		mutex_exit(txq->txq_lock);
   7431 	}
   7432 
   7433 	return 0;
   7434 }
   7435 
   7436 static void
   7437 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7438 {
   7439 
   7440 	wm_nq_send_common_locked(ifp, txq, true);
   7441 }
   7442 
   7443 static void
   7444 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7445     bool is_transmit)
   7446 {
   7447 	struct wm_softc *sc = ifp->if_softc;
   7448 	struct mbuf *m0;
   7449 	struct m_tag *mtag;
   7450 	struct wm_txsoft *txs;
   7451 	bus_dmamap_t dmamap;
   7452 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7453 	bool do_csum, sent;
   7454 
   7455 	KASSERT(mutex_owned(txq->txq_lock));
   7456 
   7457 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7458 		return;
   7459 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7460 		return;
   7461 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7462 		return;
   7463 
   7464 	sent = false;
   7465 
   7466 	/*
   7467 	 * Loop through the send queue, setting up transmit descriptors
   7468 	 * until we drain the queue, or use up all available transmit
   7469 	 * descriptors.
   7470 	 */
   7471 	for (;;) {
   7472 		m0 = NULL;
   7473 
   7474 		/* Get a work queue entry. */
   7475 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7476 			wm_txeof(sc, txq);
   7477 			if (txq->txq_sfree == 0) {
   7478 				DPRINTF(WM_DEBUG_TX,
   7479 				    ("%s: TX: no free job descriptors\n",
   7480 					device_xname(sc->sc_dev)));
   7481 				WM_Q_EVCNT_INCR(txq, txsstall);
   7482 				break;
   7483 			}
   7484 		}
   7485 
   7486 		/* Grab a packet off the queue. */
   7487 		if (is_transmit)
   7488 			m0 = pcq_get(txq->txq_interq);
   7489 		else
   7490 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7491 		if (m0 == NULL)
   7492 			break;
   7493 
   7494 		DPRINTF(WM_DEBUG_TX,
   7495 		    ("%s: TX: have packet to transmit: %p\n",
   7496 		    device_xname(sc->sc_dev), m0));
   7497 
   7498 		txs = &txq->txq_soft[txq->txq_snext];
   7499 		dmamap = txs->txs_dmamap;
   7500 
   7501 		/*
   7502 		 * Load the DMA map.  If this fails, the packet either
   7503 		 * didn't fit in the allotted number of segments, or we
   7504 		 * were short on resources.  For the too-many-segments
   7505 		 * case, we simply report an error and drop the packet,
   7506 		 * since we can't sanely copy a jumbo packet to a single
   7507 		 * buffer.
   7508 		 */
   7509 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7510 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7511 		if (error) {
   7512 			if (error == EFBIG) {
   7513 				WM_Q_EVCNT_INCR(txq, txdrop);
   7514 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7515 				    "DMA segments, dropping...\n",
   7516 				    device_xname(sc->sc_dev));
   7517 				wm_dump_mbuf_chain(sc, m0);
   7518 				m_freem(m0);
   7519 				continue;
   7520 			}
   7521 			/* Short on resources, just stop for now. */
   7522 			DPRINTF(WM_DEBUG_TX,
   7523 			    ("%s: TX: dmamap load failed: %d\n",
   7524 			    device_xname(sc->sc_dev), error));
   7525 			break;
   7526 		}
   7527 
   7528 		segs_needed = dmamap->dm_nsegs;
   7529 
   7530 		/*
   7531 		 * Ensure we have enough descriptors free to describe
   7532 		 * the packet.  Note, we always reserve one descriptor
   7533 		 * at the end of the ring due to the semantics of the
   7534 		 * TDT register, plus one more in the event we need
   7535 		 * to load offload context.
   7536 		 */
   7537 		if (segs_needed > txq->txq_free - 2) {
   7538 			/*
   7539 			 * Not enough free descriptors to transmit this
   7540 			 * packet.  We haven't committed anything yet,
   7541 			 * so just unload the DMA map, put the packet
   7542 			 * pack on the queue, and punt.  Notify the upper
   7543 			 * layer that there are no more slots left.
   7544 			 */
   7545 			DPRINTF(WM_DEBUG_TX,
   7546 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7547 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7548 			    segs_needed, txq->txq_free - 1));
   7549 			if (!is_transmit)
   7550 				ifp->if_flags |= IFF_OACTIVE;
   7551 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7552 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7553 			WM_Q_EVCNT_INCR(txq, txdstall);
   7554 			break;
   7555 		}
   7556 
   7557 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7558 
   7559 		DPRINTF(WM_DEBUG_TX,
   7560 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7561 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7562 
   7563 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7564 
   7565 		/*
   7566 		 * Store a pointer to the packet so that we can free it
   7567 		 * later.
   7568 		 *
   7569 		 * Initially, we consider the number of descriptors the
   7570 		 * packet uses the number of DMA segments.  This may be
   7571 		 * incremented by 1 if we do checksum offload (a descriptor
   7572 		 * is used to set the checksum context).
   7573 		 */
   7574 		txs->txs_mbuf = m0;
   7575 		txs->txs_firstdesc = txq->txq_next;
   7576 		txs->txs_ndesc = segs_needed;
   7577 
   7578 		/* Set up offload parameters for this packet. */
   7579 		uint32_t cmdlen, fields, dcmdlen;
   7580 		if (m0->m_pkthdr.csum_flags &
   7581 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7582 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7583 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7584 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7585 			    &do_csum) != 0) {
   7586 				/* Error message already displayed. */
   7587 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7588 				continue;
   7589 			}
   7590 		} else {
   7591 			do_csum = false;
   7592 			cmdlen = 0;
   7593 			fields = 0;
   7594 		}
   7595 
   7596 		/* Sync the DMA map. */
   7597 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7598 		    BUS_DMASYNC_PREWRITE);
   7599 
   7600 		/* Initialize the first transmit descriptor. */
   7601 		nexttx = txq->txq_next;
   7602 		if (!do_csum) {
   7603 			/* setup a legacy descriptor */
   7604 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7605 			    dmamap->dm_segs[0].ds_addr);
   7606 			txq->txq_descs[nexttx].wtx_cmdlen =
   7607 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7608 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7609 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7610 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7611 			    NULL) {
   7612 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7613 				    htole32(WTX_CMD_VLE);
   7614 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7615 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7616 			} else {
   7617 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7618 			}
   7619 			dcmdlen = 0;
   7620 		} else {
   7621 			/* setup an advanced data descriptor */
   7622 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7623 			    htole64(dmamap->dm_segs[0].ds_addr);
   7624 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7625 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7626 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7627 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7628 			    htole32(fields);
   7629 			DPRINTF(WM_DEBUG_TX,
   7630 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7631 			    device_xname(sc->sc_dev), nexttx,
   7632 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7633 			DPRINTF(WM_DEBUG_TX,
   7634 			    ("\t 0x%08x%08x\n", fields,
   7635 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7636 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7637 		}
   7638 
   7639 		lasttx = nexttx;
   7640 		nexttx = WM_NEXTTX(txq, nexttx);
   7641 		/*
   7642 		 * fill in the next descriptors. legacy or adcanced format
   7643 		 * is the same here
   7644 		 */
   7645 		for (seg = 1; seg < dmamap->dm_nsegs;
   7646 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7647 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7648 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7649 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7650 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7651 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7652 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7653 			lasttx = nexttx;
   7654 
   7655 			DPRINTF(WM_DEBUG_TX,
   7656 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7657 			     "len %#04zx\n",
   7658 			    device_xname(sc->sc_dev), nexttx,
   7659 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7660 			    dmamap->dm_segs[seg].ds_len));
   7661 		}
   7662 
   7663 		KASSERT(lasttx != -1);
   7664 
   7665 		/*
   7666 		 * Set up the command byte on the last descriptor of
   7667 		 * the packet.  If we're in the interrupt delay window,
   7668 		 * delay the interrupt.
   7669 		 */
   7670 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7671 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7672 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7673 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7674 
   7675 		txs->txs_lastdesc = lasttx;
   7676 
   7677 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7678 		    device_xname(sc->sc_dev),
   7679 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7680 
   7681 		/* Sync the descriptors we're using. */
   7682 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7683 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7684 
   7685 		/* Give the packet to the chip. */
   7686 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7687 		sent = true;
   7688 
   7689 		DPRINTF(WM_DEBUG_TX,
   7690 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7691 
   7692 		DPRINTF(WM_DEBUG_TX,
   7693 		    ("%s: TX: finished transmitting packet, job %d\n",
   7694 		    device_xname(sc->sc_dev), txq->txq_snext));
   7695 
   7696 		/* Advance the tx pointer. */
   7697 		txq->txq_free -= txs->txs_ndesc;
   7698 		txq->txq_next = nexttx;
   7699 
   7700 		txq->txq_sfree--;
   7701 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7702 
   7703 		/* Pass the packet to any BPF listeners. */
   7704 		bpf_mtap(ifp, m0);
   7705 	}
   7706 
   7707 	if (m0 != NULL) {
   7708 		if (!is_transmit)
   7709 			ifp->if_flags |= IFF_OACTIVE;
   7710 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7711 		WM_Q_EVCNT_INCR(txq, txdrop);
   7712 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7713 			__func__));
   7714 		m_freem(m0);
   7715 	}
   7716 
   7717 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7718 		/* No more slots; notify upper layer. */
   7719 		if (!is_transmit)
   7720 			ifp->if_flags |= IFF_OACTIVE;
   7721 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7722 	}
   7723 
   7724 	if (sent) {
   7725 		/* Set a watchdog timer in case the chip flakes out. */
   7726 		ifp->if_timer = 5;
   7727 	}
   7728 }
   7729 
   7730 static void
   7731 wm_deferred_start_locked(struct wm_txqueue *txq)
   7732 {
   7733 	struct wm_softc *sc = txq->txq_sc;
   7734 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7735 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7736 	int qid = wmq->wmq_id;
   7737 
   7738 	KASSERT(mutex_owned(txq->txq_lock));
   7739 
   7740 	if (txq->txq_stopping) {
   7741 		mutex_exit(txq->txq_lock);
   7742 		return;
   7743 	}
   7744 
   7745 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7746 		/* XXX need for ALTQ or one CPU system */
   7747 		if (qid == 0)
   7748 			wm_nq_start_locked(ifp);
   7749 		wm_nq_transmit_locked(ifp, txq);
   7750 	} else {
   7751 		/* XXX need for ALTQ or one CPU system */
   7752 		if (qid == 0)
   7753 			wm_start_locked(ifp);
   7754 		wm_transmit_locked(ifp, txq);
   7755 	}
   7756 }
   7757 
   7758 /* Interrupt */
   7759 
   7760 /*
   7761  * wm_txeof:
   7762  *
   7763  *	Helper; handle transmit interrupts.
   7764  */
   7765 static int
   7766 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7767 {
   7768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7769 	struct wm_txsoft *txs;
   7770 	bool processed = false;
   7771 	int count = 0;
   7772 	int i;
   7773 	uint8_t status;
   7774 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7775 
   7776 	KASSERT(mutex_owned(txq->txq_lock));
   7777 
   7778 	if (txq->txq_stopping)
   7779 		return 0;
   7780 
   7781 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7782 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7783 	if (wmq->wmq_id == 0)
   7784 		ifp->if_flags &= ~IFF_OACTIVE;
   7785 
   7786 	/*
   7787 	 * Go through the Tx list and free mbufs for those
   7788 	 * frames which have been transmitted.
   7789 	 */
   7790 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7791 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7792 		txs = &txq->txq_soft[i];
   7793 
   7794 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7795 			device_xname(sc->sc_dev), i));
   7796 
   7797 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7798 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7799 
   7800 		status =
   7801 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7802 		if ((status & WTX_ST_DD) == 0) {
   7803 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7804 			    BUS_DMASYNC_PREREAD);
   7805 			break;
   7806 		}
   7807 
   7808 		processed = true;
   7809 		count++;
   7810 		DPRINTF(WM_DEBUG_TX,
   7811 		    ("%s: TX: job %d done: descs %d..%d\n",
   7812 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7813 		    txs->txs_lastdesc));
   7814 
   7815 		/*
   7816 		 * XXX We should probably be using the statistics
   7817 		 * XXX registers, but I don't know if they exist
   7818 		 * XXX on chips before the i82544.
   7819 		 */
   7820 
   7821 #ifdef WM_EVENT_COUNTERS
   7822 		if (status & WTX_ST_TU)
   7823 			WM_Q_EVCNT_INCR(txq, tu);
   7824 #endif /* WM_EVENT_COUNTERS */
   7825 
   7826 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7827 			ifp->if_oerrors++;
   7828 			if (status & WTX_ST_LC)
   7829 				log(LOG_WARNING, "%s: late collision\n",
   7830 				    device_xname(sc->sc_dev));
   7831 			else if (status & WTX_ST_EC) {
   7832 				ifp->if_collisions += 16;
   7833 				log(LOG_WARNING, "%s: excessive collisions\n",
   7834 				    device_xname(sc->sc_dev));
   7835 			}
   7836 		} else
   7837 			ifp->if_opackets++;
   7838 
   7839 		txq->txq_packets++;
   7840 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7841 
   7842 		txq->txq_free += txs->txs_ndesc;
   7843 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7844 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7845 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7846 		m_freem(txs->txs_mbuf);
   7847 		txs->txs_mbuf = NULL;
   7848 	}
   7849 
   7850 	/* Update the dirty transmit buffer pointer. */
   7851 	txq->txq_sdirty = i;
   7852 	DPRINTF(WM_DEBUG_TX,
   7853 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7854 
   7855 	if (count != 0)
   7856 		rnd_add_uint32(&sc->rnd_source, count);
   7857 
   7858 	/*
   7859 	 * If there are no more pending transmissions, cancel the watchdog
   7860 	 * timer.
   7861 	 */
   7862 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7863 		ifp->if_timer = 0;
   7864 
   7865 	return processed;
   7866 }
   7867 
   7868 static inline uint32_t
   7869 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7870 {
   7871 	struct wm_softc *sc = rxq->rxq_sc;
   7872 
   7873 	if (sc->sc_type == WM_T_82574)
   7874 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7875 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7876 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7877 	else
   7878 		return rxq->rxq_descs[idx].wrx_status;
   7879 }
   7880 
   7881 static inline uint32_t
   7882 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7883 {
   7884 	struct wm_softc *sc = rxq->rxq_sc;
   7885 
   7886 	if (sc->sc_type == WM_T_82574)
   7887 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7888 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7889 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7890 	else
   7891 		return rxq->rxq_descs[idx].wrx_errors;
   7892 }
   7893 
   7894 static inline uint16_t
   7895 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7896 {
   7897 	struct wm_softc *sc = rxq->rxq_sc;
   7898 
   7899 	if (sc->sc_type == WM_T_82574)
   7900 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7901 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7902 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7903 	else
   7904 		return rxq->rxq_descs[idx].wrx_special;
   7905 }
   7906 
   7907 static inline int
   7908 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7909 {
   7910 	struct wm_softc *sc = rxq->rxq_sc;
   7911 
   7912 	if (sc->sc_type == WM_T_82574)
   7913 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7914 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7915 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7916 	else
   7917 		return rxq->rxq_descs[idx].wrx_len;
   7918 }
   7919 
   7920 #ifdef WM_DEBUG
   7921 static inline uint32_t
   7922 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7923 {
   7924 	struct wm_softc *sc = rxq->rxq_sc;
   7925 
   7926 	if (sc->sc_type == WM_T_82574)
   7927 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7928 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7929 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7930 	else
   7931 		return 0;
   7932 }
   7933 
   7934 static inline uint8_t
   7935 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7936 {
   7937 	struct wm_softc *sc = rxq->rxq_sc;
   7938 
   7939 	if (sc->sc_type == WM_T_82574)
   7940 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7941 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7942 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7943 	else
   7944 		return 0;
   7945 }
   7946 #endif /* WM_DEBUG */
   7947 
   7948 static inline bool
   7949 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7950     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7951 {
   7952 
   7953 	if (sc->sc_type == WM_T_82574)
   7954 		return (status & ext_bit) != 0;
   7955 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7956 		return (status & nq_bit) != 0;
   7957 	else
   7958 		return (status & legacy_bit) != 0;
   7959 }
   7960 
   7961 static inline bool
   7962 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7963     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7964 {
   7965 
   7966 	if (sc->sc_type == WM_T_82574)
   7967 		return (error & ext_bit) != 0;
   7968 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7969 		return (error & nq_bit) != 0;
   7970 	else
   7971 		return (error & legacy_bit) != 0;
   7972 }
   7973 
   7974 static inline bool
   7975 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7976 {
   7977 
   7978 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7979 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7980 		return true;
   7981 	else
   7982 		return false;
   7983 }
   7984 
   7985 static inline bool
   7986 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7987 {
   7988 	struct wm_softc *sc = rxq->rxq_sc;
   7989 
   7990 	/* XXXX missing error bit for newqueue? */
   7991 	if (wm_rxdesc_is_set_error(sc, errors,
   7992 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7993 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7994 		NQRXC_ERROR_RXE)) {
   7995 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7996 			log(LOG_WARNING, "%s: symbol error\n",
   7997 			    device_xname(sc->sc_dev));
   7998 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7999 			log(LOG_WARNING, "%s: receive sequence error\n",
   8000 			    device_xname(sc->sc_dev));
   8001 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8002 			log(LOG_WARNING, "%s: CRC error\n",
   8003 			    device_xname(sc->sc_dev));
   8004 		return true;
   8005 	}
   8006 
   8007 	return false;
   8008 }
   8009 
   8010 static inline bool
   8011 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8012 {
   8013 	struct wm_softc *sc = rxq->rxq_sc;
   8014 
   8015 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8016 		NQRXC_STATUS_DD)) {
   8017 		/* We have processed all of the receive descriptors. */
   8018 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8019 		return false;
   8020 	}
   8021 
   8022 	return true;
   8023 }
   8024 
   8025 static inline bool
   8026 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8027     struct mbuf *m)
   8028 {
   8029 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8030 
   8031 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8032 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8033 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8034 	}
   8035 
   8036 	return true;
   8037 }
   8038 
   8039 static inline void
   8040 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8041     uint32_t errors, struct mbuf *m)
   8042 {
   8043 	struct wm_softc *sc = rxq->rxq_sc;
   8044 
   8045 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8046 		if (wm_rxdesc_is_set_status(sc, status,
   8047 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8048 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8049 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8050 			if (wm_rxdesc_is_set_error(sc, errors,
   8051 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8052 				m->m_pkthdr.csum_flags |=
   8053 					M_CSUM_IPv4_BAD;
   8054 		}
   8055 		if (wm_rxdesc_is_set_status(sc, status,
   8056 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8057 			/*
   8058 			 * Note: we don't know if this was TCP or UDP,
   8059 			 * so we just set both bits, and expect the
   8060 			 * upper layers to deal.
   8061 			 */
   8062 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8063 			m->m_pkthdr.csum_flags |=
   8064 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8065 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8066 			if (wm_rxdesc_is_set_error(sc, errors,
   8067 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8068 				m->m_pkthdr.csum_flags |=
   8069 					M_CSUM_TCP_UDP_BAD;
   8070 		}
   8071 	}
   8072 }
   8073 
   8074 /*
   8075  * wm_rxeof:
   8076  *
   8077  *	Helper; handle receive interrupts.
   8078  */
   8079 static void
   8080 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8081 {
   8082 	struct wm_softc *sc = rxq->rxq_sc;
   8083 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8084 	struct wm_rxsoft *rxs;
   8085 	struct mbuf *m;
   8086 	int i, len;
   8087 	int count = 0;
   8088 	uint32_t status, errors;
   8089 	uint16_t vlantag;
   8090 
   8091 	KASSERT(mutex_owned(rxq->rxq_lock));
   8092 
   8093 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8094 		if (limit-- == 0) {
   8095 			rxq->rxq_ptr = i;
   8096 			break;
   8097 		}
   8098 
   8099 		rxs = &rxq->rxq_soft[i];
   8100 
   8101 		DPRINTF(WM_DEBUG_RX,
   8102 		    ("%s: RX: checking descriptor %d\n",
   8103 		    device_xname(sc->sc_dev), i));
   8104 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8105 
   8106 		status = wm_rxdesc_get_status(rxq, i);
   8107 		errors = wm_rxdesc_get_errors(rxq, i);
   8108 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8109 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8110 #ifdef WM_DEBUG
   8111 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8112 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8113 #endif
   8114 
   8115 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8116 			/*
   8117 			 * Update the receive pointer holding rxq_lock
   8118 			 * consistent with increment counter.
   8119 			 */
   8120 			rxq->rxq_ptr = i;
   8121 			break;
   8122 		}
   8123 
   8124 		count++;
   8125 		if (__predict_false(rxq->rxq_discard)) {
   8126 			DPRINTF(WM_DEBUG_RX,
   8127 			    ("%s: RX: discarding contents of descriptor %d\n",
   8128 			    device_xname(sc->sc_dev), i));
   8129 			wm_init_rxdesc(rxq, i);
   8130 			if (wm_rxdesc_is_eop(rxq, status)) {
   8131 				/* Reset our state. */
   8132 				DPRINTF(WM_DEBUG_RX,
   8133 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8134 				    device_xname(sc->sc_dev)));
   8135 				rxq->rxq_discard = 0;
   8136 			}
   8137 			continue;
   8138 		}
   8139 
   8140 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8141 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8142 
   8143 		m = rxs->rxs_mbuf;
   8144 
   8145 		/*
   8146 		 * Add a new receive buffer to the ring, unless of
   8147 		 * course the length is zero. Treat the latter as a
   8148 		 * failed mapping.
   8149 		 */
   8150 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8151 			/*
   8152 			 * Failed, throw away what we've done so
   8153 			 * far, and discard the rest of the packet.
   8154 			 */
   8155 			ifp->if_ierrors++;
   8156 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8157 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8158 			wm_init_rxdesc(rxq, i);
   8159 			if (!wm_rxdesc_is_eop(rxq, status))
   8160 				rxq->rxq_discard = 1;
   8161 			if (rxq->rxq_head != NULL)
   8162 				m_freem(rxq->rxq_head);
   8163 			WM_RXCHAIN_RESET(rxq);
   8164 			DPRINTF(WM_DEBUG_RX,
   8165 			    ("%s: RX: Rx buffer allocation failed, "
   8166 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8167 			    rxq->rxq_discard ? " (discard)" : ""));
   8168 			continue;
   8169 		}
   8170 
   8171 		m->m_len = len;
   8172 		rxq->rxq_len += len;
   8173 		DPRINTF(WM_DEBUG_RX,
   8174 		    ("%s: RX: buffer at %p len %d\n",
   8175 		    device_xname(sc->sc_dev), m->m_data, len));
   8176 
   8177 		/* If this is not the end of the packet, keep looking. */
   8178 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8179 			WM_RXCHAIN_LINK(rxq, m);
   8180 			DPRINTF(WM_DEBUG_RX,
   8181 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8182 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8183 			continue;
   8184 		}
   8185 
   8186 		/*
   8187 		 * Okay, we have the entire packet now.  The chip is
   8188 		 * configured to include the FCS except I350 and I21[01]
   8189 		 * (not all chips can be configured to strip it),
   8190 		 * so we need to trim it.
   8191 		 * May need to adjust length of previous mbuf in the
   8192 		 * chain if the current mbuf is too short.
   8193 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8194 		 * is always set in I350, so we don't trim it.
   8195 		 */
   8196 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8197 		    && (sc->sc_type != WM_T_I210)
   8198 		    && (sc->sc_type != WM_T_I211)) {
   8199 			if (m->m_len < ETHER_CRC_LEN) {
   8200 				rxq->rxq_tail->m_len
   8201 				    -= (ETHER_CRC_LEN - m->m_len);
   8202 				m->m_len = 0;
   8203 			} else
   8204 				m->m_len -= ETHER_CRC_LEN;
   8205 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8206 		} else
   8207 			len = rxq->rxq_len;
   8208 
   8209 		WM_RXCHAIN_LINK(rxq, m);
   8210 
   8211 		*rxq->rxq_tailp = NULL;
   8212 		m = rxq->rxq_head;
   8213 
   8214 		WM_RXCHAIN_RESET(rxq);
   8215 
   8216 		DPRINTF(WM_DEBUG_RX,
   8217 		    ("%s: RX: have entire packet, len -> %d\n",
   8218 		    device_xname(sc->sc_dev), len));
   8219 
   8220 		/* If an error occurred, update stats and drop the packet. */
   8221 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8222 			m_freem(m);
   8223 			continue;
   8224 		}
   8225 
   8226 		/* No errors.  Receive the packet. */
   8227 		m_set_rcvif(m, ifp);
   8228 		m->m_pkthdr.len = len;
   8229 		/*
   8230 		 * TODO
   8231 		 * should be save rsshash and rsstype to this mbuf.
   8232 		 */
   8233 		DPRINTF(WM_DEBUG_RX,
   8234 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8235 			device_xname(sc->sc_dev), rsstype, rsshash));
   8236 
   8237 		/*
   8238 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8239 		 * for us.  Associate the tag with the packet.
   8240 		 */
   8241 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8242 			continue;
   8243 
   8244 		/* Set up checksum info for this packet. */
   8245 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8246 		/*
   8247 		 * Update the receive pointer holding rxq_lock consistent with
   8248 		 * increment counter.
   8249 		 */
   8250 		rxq->rxq_ptr = i;
   8251 		rxq->rxq_packets++;
   8252 		rxq->rxq_bytes += len;
   8253 		mutex_exit(rxq->rxq_lock);
   8254 
   8255 		/* Pass it on. */
   8256 		if_percpuq_enqueue(sc->sc_ipq, m);
   8257 
   8258 		mutex_enter(rxq->rxq_lock);
   8259 
   8260 		if (rxq->rxq_stopping)
   8261 			break;
   8262 	}
   8263 
   8264 	if (count != 0)
   8265 		rnd_add_uint32(&sc->rnd_source, count);
   8266 
   8267 	DPRINTF(WM_DEBUG_RX,
   8268 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8269 }
   8270 
   8271 /*
   8272  * wm_linkintr_gmii:
   8273  *
   8274  *	Helper; handle link interrupts for GMII.
   8275  */
   8276 static void
   8277 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8278 {
   8279 
   8280 	KASSERT(WM_CORE_LOCKED(sc));
   8281 
   8282 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8283 		__func__));
   8284 
   8285 	if (icr & ICR_LSC) {
   8286 		uint32_t reg;
   8287 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8288 
   8289 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8290 			wm_gig_downshift_workaround_ich8lan(sc);
   8291 
   8292 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8293 			device_xname(sc->sc_dev)));
   8294 		mii_pollstat(&sc->sc_mii);
   8295 		if (sc->sc_type == WM_T_82543) {
   8296 			int miistatus, active;
   8297 
   8298 			/*
   8299 			 * With 82543, we need to force speed and
   8300 			 * duplex on the MAC equal to what the PHY
   8301 			 * speed and duplex configuration is.
   8302 			 */
   8303 			miistatus = sc->sc_mii.mii_media_status;
   8304 
   8305 			if (miistatus & IFM_ACTIVE) {
   8306 				active = sc->sc_mii.mii_media_active;
   8307 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8308 				switch (IFM_SUBTYPE(active)) {
   8309 				case IFM_10_T:
   8310 					sc->sc_ctrl |= CTRL_SPEED_10;
   8311 					break;
   8312 				case IFM_100_TX:
   8313 					sc->sc_ctrl |= CTRL_SPEED_100;
   8314 					break;
   8315 				case IFM_1000_T:
   8316 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8317 					break;
   8318 				default:
   8319 					/*
   8320 					 * fiber?
   8321 					 * Shoud not enter here.
   8322 					 */
   8323 					printf("unknown media (%x)\n", active);
   8324 					break;
   8325 				}
   8326 				if (active & IFM_FDX)
   8327 					sc->sc_ctrl |= CTRL_FD;
   8328 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8329 			}
   8330 		} else if ((sc->sc_type == WM_T_ICH8)
   8331 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8332 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8333 		} else if (sc->sc_type == WM_T_PCH) {
   8334 			wm_k1_gig_workaround_hv(sc,
   8335 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8336 		}
   8337 
   8338 		if ((sc->sc_phytype == WMPHY_82578)
   8339 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8340 			== IFM_1000_T)) {
   8341 
   8342 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8343 				delay(200*1000); /* XXX too big */
   8344 
   8345 				/* Link stall fix for link up */
   8346 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8347 				    HV_MUX_DATA_CTRL,
   8348 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8349 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8350 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8351 				    HV_MUX_DATA_CTRL,
   8352 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8353 			}
   8354 		}
   8355 		/*
   8356 		 * I217 Packet Loss issue:
   8357 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8358 		 * on power up.
   8359 		 * Set the Beacon Duration for I217 to 8 usec
   8360 		 */
   8361 		if ((sc->sc_type == WM_T_PCH_LPT)
   8362 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8363 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8364 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8365 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8366 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8367 		}
   8368 
   8369 		/* XXX Work-around I218 hang issue */
   8370 		/* e1000_k1_workaround_lpt_lp() */
   8371 
   8372 		if ((sc->sc_type == WM_T_PCH_LPT)
   8373 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8374 			/*
   8375 			 * Set platform power management values for Latency
   8376 			 * Tolerance Reporting (LTR)
   8377 			 */
   8378 			wm_platform_pm_pch_lpt(sc,
   8379 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8380 				    != 0));
   8381 		}
   8382 
   8383 		/* FEXTNVM6 K1-off workaround */
   8384 		if (sc->sc_type == WM_T_PCH_SPT) {
   8385 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8386 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8387 			    & FEXTNVM6_K1_OFF_ENABLE)
   8388 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8389 			else
   8390 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8391 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8392 		}
   8393 	} else if (icr & ICR_RXSEQ) {
   8394 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8395 			device_xname(sc->sc_dev)));
   8396 	}
   8397 }
   8398 
   8399 /*
   8400  * wm_linkintr_tbi:
   8401  *
   8402  *	Helper; handle link interrupts for TBI mode.
   8403  */
   8404 static void
   8405 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8406 {
   8407 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8408 	uint32_t status;
   8409 
   8410 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8411 		__func__));
   8412 
   8413 	status = CSR_READ(sc, WMREG_STATUS);
   8414 	if (icr & ICR_LSC) {
   8415 		if (status & STATUS_LU) {
   8416 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8417 			    device_xname(sc->sc_dev),
   8418 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8419 			/*
   8420 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8421 			 * so we should update sc->sc_ctrl
   8422 			 */
   8423 
   8424 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8425 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8426 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8427 			if (status & STATUS_FD)
   8428 				sc->sc_tctl |=
   8429 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8430 			else
   8431 				sc->sc_tctl |=
   8432 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8433 			if (sc->sc_ctrl & CTRL_TFCE)
   8434 				sc->sc_fcrtl |= FCRTL_XONE;
   8435 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8436 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8437 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8438 				      sc->sc_fcrtl);
   8439 			sc->sc_tbi_linkup = 1;
   8440 			if_link_state_change(ifp, LINK_STATE_UP);
   8441 		} else {
   8442 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8443 			    device_xname(sc->sc_dev)));
   8444 			sc->sc_tbi_linkup = 0;
   8445 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8446 		}
   8447 		/* Update LED */
   8448 		wm_tbi_serdes_set_linkled(sc);
   8449 	} else if (icr & ICR_RXSEQ) {
   8450 		DPRINTF(WM_DEBUG_LINK,
   8451 		    ("%s: LINK: Receive sequence error\n",
   8452 		    device_xname(sc->sc_dev)));
   8453 	}
   8454 }
   8455 
   8456 /*
   8457  * wm_linkintr_serdes:
   8458  *
   8459  *	Helper; handle link interrupts for TBI mode.
   8460  */
   8461 static void
   8462 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8463 {
   8464 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8465 	struct mii_data *mii = &sc->sc_mii;
   8466 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8467 	uint32_t pcs_adv, pcs_lpab, reg;
   8468 
   8469 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8470 		__func__));
   8471 
   8472 	if (icr & ICR_LSC) {
   8473 		/* Check PCS */
   8474 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8475 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8476 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8477 				device_xname(sc->sc_dev)));
   8478 			mii->mii_media_status |= IFM_ACTIVE;
   8479 			sc->sc_tbi_linkup = 1;
   8480 			if_link_state_change(ifp, LINK_STATE_UP);
   8481 		} else {
   8482 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8483 				device_xname(sc->sc_dev)));
   8484 			mii->mii_media_status |= IFM_NONE;
   8485 			sc->sc_tbi_linkup = 0;
   8486 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8487 			wm_tbi_serdes_set_linkled(sc);
   8488 			return;
   8489 		}
   8490 		mii->mii_media_active |= IFM_1000_SX;
   8491 		if ((reg & PCS_LSTS_FDX) != 0)
   8492 			mii->mii_media_active |= IFM_FDX;
   8493 		else
   8494 			mii->mii_media_active |= IFM_HDX;
   8495 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8496 			/* Check flow */
   8497 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8498 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8499 				DPRINTF(WM_DEBUG_LINK,
   8500 				    ("XXX LINKOK but not ACOMP\n"));
   8501 				return;
   8502 			}
   8503 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8504 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8505 			DPRINTF(WM_DEBUG_LINK,
   8506 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8507 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8508 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8509 				mii->mii_media_active |= IFM_FLOW
   8510 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8511 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8512 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8513 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8514 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8515 				mii->mii_media_active |= IFM_FLOW
   8516 				    | IFM_ETH_TXPAUSE;
   8517 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8518 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8519 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8520 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8521 				mii->mii_media_active |= IFM_FLOW
   8522 				    | IFM_ETH_RXPAUSE;
   8523 		}
   8524 		/* Update LED */
   8525 		wm_tbi_serdes_set_linkled(sc);
   8526 	} else {
   8527 		DPRINTF(WM_DEBUG_LINK,
   8528 		    ("%s: LINK: Receive sequence error\n",
   8529 		    device_xname(sc->sc_dev)));
   8530 	}
   8531 }
   8532 
   8533 /*
   8534  * wm_linkintr:
   8535  *
   8536  *	Helper; handle link interrupts.
   8537  */
   8538 static void
   8539 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8540 {
   8541 
   8542 	KASSERT(WM_CORE_LOCKED(sc));
   8543 
   8544 	if (sc->sc_flags & WM_F_HAS_MII)
   8545 		wm_linkintr_gmii(sc, icr);
   8546 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8547 	    && (sc->sc_type >= WM_T_82575))
   8548 		wm_linkintr_serdes(sc, icr);
   8549 	else
   8550 		wm_linkintr_tbi(sc, icr);
   8551 }
   8552 
   8553 /*
   8554  * wm_intr_legacy:
   8555  *
   8556  *	Interrupt service routine for INTx and MSI.
   8557  */
   8558 static int
   8559 wm_intr_legacy(void *arg)
   8560 {
   8561 	struct wm_softc *sc = arg;
   8562 	struct wm_queue *wmq = &sc->sc_queue[0];
   8563 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8564 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8565 	uint32_t icr, rndval = 0;
   8566 	int handled = 0;
   8567 
   8568 	while (1 /* CONSTCOND */) {
   8569 		icr = CSR_READ(sc, WMREG_ICR);
   8570 		if ((icr & sc->sc_icr) == 0)
   8571 			break;
   8572 		if (handled == 0) {
   8573 			DPRINTF(WM_DEBUG_TX,
   8574 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8575 		}
   8576 		if (rndval == 0)
   8577 			rndval = icr;
   8578 
   8579 		mutex_enter(rxq->rxq_lock);
   8580 
   8581 		if (rxq->rxq_stopping) {
   8582 			mutex_exit(rxq->rxq_lock);
   8583 			break;
   8584 		}
   8585 
   8586 		handled = 1;
   8587 
   8588 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8589 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8590 			DPRINTF(WM_DEBUG_RX,
   8591 			    ("%s: RX: got Rx intr 0x%08x\n",
   8592 			    device_xname(sc->sc_dev),
   8593 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8594 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8595 		}
   8596 #endif
   8597 		wm_rxeof(rxq, UINT_MAX);
   8598 
   8599 		mutex_exit(rxq->rxq_lock);
   8600 		mutex_enter(txq->txq_lock);
   8601 
   8602 		if (txq->txq_stopping) {
   8603 			mutex_exit(txq->txq_lock);
   8604 			break;
   8605 		}
   8606 
   8607 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8608 		if (icr & ICR_TXDW) {
   8609 			DPRINTF(WM_DEBUG_TX,
   8610 			    ("%s: TX: got TXDW interrupt\n",
   8611 			    device_xname(sc->sc_dev)));
   8612 			WM_Q_EVCNT_INCR(txq, txdw);
   8613 		}
   8614 #endif
   8615 		wm_txeof(sc, txq);
   8616 
   8617 		mutex_exit(txq->txq_lock);
   8618 		WM_CORE_LOCK(sc);
   8619 
   8620 		if (sc->sc_core_stopping) {
   8621 			WM_CORE_UNLOCK(sc);
   8622 			break;
   8623 		}
   8624 
   8625 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8626 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8627 			wm_linkintr(sc, icr);
   8628 		}
   8629 
   8630 		WM_CORE_UNLOCK(sc);
   8631 
   8632 		if (icr & ICR_RXO) {
   8633 #if defined(WM_DEBUG)
   8634 			log(LOG_WARNING, "%s: Receive overrun\n",
   8635 			    device_xname(sc->sc_dev));
   8636 #endif /* defined(WM_DEBUG) */
   8637 		}
   8638 	}
   8639 
   8640 	rnd_add_uint32(&sc->rnd_source, rndval);
   8641 
   8642 	if (handled) {
   8643 		/* Try to get more packets going. */
   8644 		softint_schedule(wmq->wmq_si);
   8645 	}
   8646 
   8647 	return handled;
   8648 }
   8649 
   8650 static inline void
   8651 wm_txrxintr_disable(struct wm_queue *wmq)
   8652 {
   8653 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8654 
   8655 	if (sc->sc_type == WM_T_82574)
   8656 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8657 	else if (sc->sc_type == WM_T_82575)
   8658 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8659 	else
   8660 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8661 }
   8662 
   8663 static inline void
   8664 wm_txrxintr_enable(struct wm_queue *wmq)
   8665 {
   8666 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8667 
   8668 	wm_itrs_calculate(sc, wmq);
   8669 
   8670 	if (sc->sc_type == WM_T_82574)
   8671 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8672 	else if (sc->sc_type == WM_T_82575)
   8673 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8674 	else
   8675 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8676 }
   8677 
   8678 static int
   8679 wm_txrxintr_msix(void *arg)
   8680 {
   8681 	struct wm_queue *wmq = arg;
   8682 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8683 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8684 	struct wm_softc *sc = txq->txq_sc;
   8685 	u_int limit = sc->sc_rx_intr_process_limit;
   8686 
   8687 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8688 
   8689 	DPRINTF(WM_DEBUG_TX,
   8690 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8691 
   8692 	wm_txrxintr_disable(wmq);
   8693 
   8694 	mutex_enter(txq->txq_lock);
   8695 
   8696 	if (txq->txq_stopping) {
   8697 		mutex_exit(txq->txq_lock);
   8698 		return 0;
   8699 	}
   8700 
   8701 	WM_Q_EVCNT_INCR(txq, txdw);
   8702 	wm_txeof(sc, txq);
   8703 	/* wm_deferred start() is done in wm_handle_queue(). */
   8704 	mutex_exit(txq->txq_lock);
   8705 
   8706 	DPRINTF(WM_DEBUG_RX,
   8707 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8708 	mutex_enter(rxq->rxq_lock);
   8709 
   8710 	if (rxq->rxq_stopping) {
   8711 		mutex_exit(rxq->rxq_lock);
   8712 		return 0;
   8713 	}
   8714 
   8715 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8716 	wm_rxeof(rxq, limit);
   8717 	mutex_exit(rxq->rxq_lock);
   8718 
   8719 	wm_itrs_writereg(sc, wmq);
   8720 
   8721 	softint_schedule(wmq->wmq_si);
   8722 
   8723 	return 1;
   8724 }
   8725 
   8726 static void
   8727 wm_handle_queue(void *arg)
   8728 {
   8729 	struct wm_queue *wmq = arg;
   8730 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8731 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8732 	struct wm_softc *sc = txq->txq_sc;
   8733 	u_int limit = sc->sc_rx_process_limit;
   8734 
   8735 	mutex_enter(txq->txq_lock);
   8736 	if (txq->txq_stopping) {
   8737 		mutex_exit(txq->txq_lock);
   8738 		return;
   8739 	}
   8740 	wm_txeof(sc, txq);
   8741 	wm_deferred_start_locked(txq);
   8742 	mutex_exit(txq->txq_lock);
   8743 
   8744 	mutex_enter(rxq->rxq_lock);
   8745 	if (rxq->rxq_stopping) {
   8746 		mutex_exit(rxq->rxq_lock);
   8747 		return;
   8748 	}
   8749 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8750 	wm_rxeof(rxq, limit);
   8751 	mutex_exit(rxq->rxq_lock);
   8752 
   8753 	wm_txrxintr_enable(wmq);
   8754 }
   8755 
   8756 /*
   8757  * wm_linkintr_msix:
   8758  *
   8759  *	Interrupt service routine for link status change for MSI-X.
   8760  */
   8761 static int
   8762 wm_linkintr_msix(void *arg)
   8763 {
   8764 	struct wm_softc *sc = arg;
   8765 	uint32_t reg;
   8766 
   8767 	DPRINTF(WM_DEBUG_LINK,
   8768 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8769 
   8770 	reg = CSR_READ(sc, WMREG_ICR);
   8771 	WM_CORE_LOCK(sc);
   8772 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8773 		goto out;
   8774 
   8775 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8776 	wm_linkintr(sc, ICR_LSC);
   8777 
   8778 out:
   8779 	WM_CORE_UNLOCK(sc);
   8780 
   8781 	if (sc->sc_type == WM_T_82574)
   8782 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8783 	else if (sc->sc_type == WM_T_82575)
   8784 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8785 	else
   8786 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8787 
   8788 	return 1;
   8789 }
   8790 
   8791 /*
   8792  * Media related.
   8793  * GMII, SGMII, TBI (and SERDES)
   8794  */
   8795 
   8796 /* Common */
   8797 
   8798 /*
   8799  * wm_tbi_serdes_set_linkled:
   8800  *
   8801  *	Update the link LED on TBI and SERDES devices.
   8802  */
   8803 static void
   8804 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8805 {
   8806 
   8807 	if (sc->sc_tbi_linkup)
   8808 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8809 	else
   8810 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8811 
   8812 	/* 82540 or newer devices are active low */
   8813 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8814 
   8815 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8816 }
   8817 
   8818 /* GMII related */
   8819 
   8820 /*
   8821  * wm_gmii_reset:
   8822  *
   8823  *	Reset the PHY.
   8824  */
   8825 static void
   8826 wm_gmii_reset(struct wm_softc *sc)
   8827 {
   8828 	uint32_t reg;
   8829 	int rv;
   8830 
   8831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8832 		device_xname(sc->sc_dev), __func__));
   8833 
   8834 	rv = sc->phy.acquire(sc);
   8835 	if (rv != 0) {
   8836 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8837 		    __func__);
   8838 		return;
   8839 	}
   8840 
   8841 	switch (sc->sc_type) {
   8842 	case WM_T_82542_2_0:
   8843 	case WM_T_82542_2_1:
   8844 		/* null */
   8845 		break;
   8846 	case WM_T_82543:
   8847 		/*
   8848 		 * With 82543, we need to force speed and duplex on the MAC
   8849 		 * equal to what the PHY speed and duplex configuration is.
   8850 		 * In addition, we need to perform a hardware reset on the PHY
   8851 		 * to take it out of reset.
   8852 		 */
   8853 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8854 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8855 
   8856 		/* The PHY reset pin is active-low. */
   8857 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8858 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8859 		    CTRL_EXT_SWDPIN(4));
   8860 		reg |= CTRL_EXT_SWDPIO(4);
   8861 
   8862 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8863 		CSR_WRITE_FLUSH(sc);
   8864 		delay(10*1000);
   8865 
   8866 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8867 		CSR_WRITE_FLUSH(sc);
   8868 		delay(150);
   8869 #if 0
   8870 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8871 #endif
   8872 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8873 		break;
   8874 	case WM_T_82544:	/* reset 10000us */
   8875 	case WM_T_82540:
   8876 	case WM_T_82545:
   8877 	case WM_T_82545_3:
   8878 	case WM_T_82546:
   8879 	case WM_T_82546_3:
   8880 	case WM_T_82541:
   8881 	case WM_T_82541_2:
   8882 	case WM_T_82547:
   8883 	case WM_T_82547_2:
   8884 	case WM_T_82571:	/* reset 100us */
   8885 	case WM_T_82572:
   8886 	case WM_T_82573:
   8887 	case WM_T_82574:
   8888 	case WM_T_82575:
   8889 	case WM_T_82576:
   8890 	case WM_T_82580:
   8891 	case WM_T_I350:
   8892 	case WM_T_I354:
   8893 	case WM_T_I210:
   8894 	case WM_T_I211:
   8895 	case WM_T_82583:
   8896 	case WM_T_80003:
   8897 		/* generic reset */
   8898 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8899 		CSR_WRITE_FLUSH(sc);
   8900 		delay(20000);
   8901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8902 		CSR_WRITE_FLUSH(sc);
   8903 		delay(20000);
   8904 
   8905 		if ((sc->sc_type == WM_T_82541)
   8906 		    || (sc->sc_type == WM_T_82541_2)
   8907 		    || (sc->sc_type == WM_T_82547)
   8908 		    || (sc->sc_type == WM_T_82547_2)) {
   8909 			/* workaround for igp are done in igp_reset() */
   8910 			/* XXX add code to set LED after phy reset */
   8911 		}
   8912 		break;
   8913 	case WM_T_ICH8:
   8914 	case WM_T_ICH9:
   8915 	case WM_T_ICH10:
   8916 	case WM_T_PCH:
   8917 	case WM_T_PCH2:
   8918 	case WM_T_PCH_LPT:
   8919 	case WM_T_PCH_SPT:
   8920 		/* generic reset */
   8921 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8922 		CSR_WRITE_FLUSH(sc);
   8923 		delay(100);
   8924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8925 		CSR_WRITE_FLUSH(sc);
   8926 		delay(150);
   8927 		break;
   8928 	default:
   8929 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8930 		    __func__);
   8931 		break;
   8932 	}
   8933 
   8934 	sc->phy.release(sc);
   8935 
   8936 	/* get_cfg_done */
   8937 	wm_get_cfg_done(sc);
   8938 
   8939 	/* extra setup */
   8940 	switch (sc->sc_type) {
   8941 	case WM_T_82542_2_0:
   8942 	case WM_T_82542_2_1:
   8943 	case WM_T_82543:
   8944 	case WM_T_82544:
   8945 	case WM_T_82540:
   8946 	case WM_T_82545:
   8947 	case WM_T_82545_3:
   8948 	case WM_T_82546:
   8949 	case WM_T_82546_3:
   8950 	case WM_T_82541_2:
   8951 	case WM_T_82547_2:
   8952 	case WM_T_82571:
   8953 	case WM_T_82572:
   8954 	case WM_T_82573:
   8955 	case WM_T_82574:
   8956 	case WM_T_82583:
   8957 	case WM_T_82575:
   8958 	case WM_T_82576:
   8959 	case WM_T_82580:
   8960 	case WM_T_I350:
   8961 	case WM_T_I354:
   8962 	case WM_T_I210:
   8963 	case WM_T_I211:
   8964 	case WM_T_80003:
   8965 		/* null */
   8966 		break;
   8967 	case WM_T_82541:
   8968 	case WM_T_82547:
   8969 		/* XXX Configure actively LED after PHY reset */
   8970 		break;
   8971 	case WM_T_ICH8:
   8972 	case WM_T_ICH9:
   8973 	case WM_T_ICH10:
   8974 	case WM_T_PCH:
   8975 	case WM_T_PCH2:
   8976 	case WM_T_PCH_LPT:
   8977 	case WM_T_PCH_SPT:
   8978 		wm_phy_post_reset(sc);
   8979 		break;
   8980 	default:
   8981 		panic("%s: unknown type\n", __func__);
   8982 		break;
   8983 	}
   8984 }
   8985 
   8986 /*
   8987  * Setup sc_phytype and mii_{read|write}reg.
   8988  *
   8989  *  To identify PHY type, correct read/write function should be selected.
   8990  * To select correct read/write function, PCI ID or MAC type are required
   8991  * without accessing PHY registers.
   8992  *
   8993  *  On the first call of this function, PHY ID is not known yet. Check
   8994  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8995  * result might be incorrect.
   8996  *
   8997  *  In the second call, PHY OUI and model is used to identify PHY type.
   8998  * It might not be perfpect because of the lack of compared entry, but it
   8999  * would be better than the first call.
   9000  *
   9001  *  If the detected new result and previous assumption is different,
   9002  * diagnous message will be printed.
   9003  */
   9004 static void
   9005 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9006     uint16_t phy_model)
   9007 {
   9008 	device_t dev = sc->sc_dev;
   9009 	struct mii_data *mii = &sc->sc_mii;
   9010 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9011 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9012 	mii_readreg_t new_readreg;
   9013 	mii_writereg_t new_writereg;
   9014 
   9015 	if (mii->mii_readreg == NULL) {
   9016 		/*
   9017 		 *  This is the first call of this function. For ICH and PCH
   9018 		 * variants, it's difficult to determine the PHY access method
   9019 		 * by sc_type, so use the PCI product ID for some devices.
   9020 		 */
   9021 
   9022 		switch (sc->sc_pcidevid) {
   9023 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9024 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9025 			/* 82577 */
   9026 			new_phytype = WMPHY_82577;
   9027 			break;
   9028 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9029 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9030 			/* 82578 */
   9031 			new_phytype = WMPHY_82578;
   9032 			break;
   9033 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9034 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9035 			/* 82579 */
   9036 			new_phytype = WMPHY_82579;
   9037 			break;
   9038 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9039 		case PCI_PRODUCT_INTEL_82801I_BM:
   9040 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9041 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9042 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9043 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9044 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9045 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9046 			/* ICH8, 9, 10 with 82567 */
   9047 			new_phytype = WMPHY_BM;
   9048 			break;
   9049 		default:
   9050 			break;
   9051 		}
   9052 	} else {
   9053 		/* It's not the first call. Use PHY OUI and model */
   9054 		switch (phy_oui) {
   9055 		case MII_OUI_ATHEROS: /* XXX ??? */
   9056 			switch (phy_model) {
   9057 			case 0x0004: /* XXX */
   9058 				new_phytype = WMPHY_82578;
   9059 				break;
   9060 			default:
   9061 				break;
   9062 			}
   9063 			break;
   9064 		case MII_OUI_xxMARVELL:
   9065 			switch (phy_model) {
   9066 			case MII_MODEL_xxMARVELL_I210:
   9067 				new_phytype = WMPHY_I210;
   9068 				break;
   9069 			case MII_MODEL_xxMARVELL_E1011:
   9070 			case MII_MODEL_xxMARVELL_E1000_3:
   9071 			case MII_MODEL_xxMARVELL_E1000_5:
   9072 			case MII_MODEL_xxMARVELL_E1112:
   9073 				new_phytype = WMPHY_M88;
   9074 				break;
   9075 			case MII_MODEL_xxMARVELL_E1149:
   9076 				new_phytype = WMPHY_BM;
   9077 				break;
   9078 			case MII_MODEL_xxMARVELL_E1111:
   9079 			case MII_MODEL_xxMARVELL_I347:
   9080 			case MII_MODEL_xxMARVELL_E1512:
   9081 			case MII_MODEL_xxMARVELL_E1340M:
   9082 			case MII_MODEL_xxMARVELL_E1543:
   9083 				new_phytype = WMPHY_M88;
   9084 				break;
   9085 			case MII_MODEL_xxMARVELL_I82563:
   9086 				new_phytype = WMPHY_GG82563;
   9087 				break;
   9088 			default:
   9089 				break;
   9090 			}
   9091 			break;
   9092 		case MII_OUI_INTEL:
   9093 			switch (phy_model) {
   9094 			case MII_MODEL_INTEL_I82577:
   9095 				new_phytype = WMPHY_82577;
   9096 				break;
   9097 			case MII_MODEL_INTEL_I82579:
   9098 				new_phytype = WMPHY_82579;
   9099 				break;
   9100 			case MII_MODEL_INTEL_I217:
   9101 				new_phytype = WMPHY_I217;
   9102 				break;
   9103 			case MII_MODEL_INTEL_I82580:
   9104 			case MII_MODEL_INTEL_I350:
   9105 				new_phytype = WMPHY_82580;
   9106 				break;
   9107 			default:
   9108 				break;
   9109 			}
   9110 			break;
   9111 		case MII_OUI_yyINTEL:
   9112 			switch (phy_model) {
   9113 			case MII_MODEL_yyINTEL_I82562G:
   9114 			case MII_MODEL_yyINTEL_I82562EM:
   9115 			case MII_MODEL_yyINTEL_I82562ET:
   9116 				new_phytype = WMPHY_IFE;
   9117 				break;
   9118 			case MII_MODEL_yyINTEL_IGP01E1000:
   9119 				new_phytype = WMPHY_IGP;
   9120 				break;
   9121 			case MII_MODEL_yyINTEL_I82566:
   9122 				new_phytype = WMPHY_IGP_3;
   9123 				break;
   9124 			default:
   9125 				break;
   9126 			}
   9127 			break;
   9128 		default:
   9129 			break;
   9130 		}
   9131 		if (new_phytype == WMPHY_UNKNOWN)
   9132 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9133 			    __func__);
   9134 
   9135 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9136 		    && (sc->sc_phytype != new_phytype )) {
   9137 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9138 			    "was incorrect. PHY type from PHY ID = %u\n",
   9139 			    sc->sc_phytype, new_phytype);
   9140 		}
   9141 	}
   9142 
   9143 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9144 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9145 		/* SGMII */
   9146 		new_readreg = wm_sgmii_readreg;
   9147 		new_writereg = wm_sgmii_writereg;
   9148 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9149 		/* BM2 (phyaddr == 1) */
   9150 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9151 		    && (new_phytype != WMPHY_BM)
   9152 		    && (new_phytype != WMPHY_UNKNOWN))
   9153 			doubt_phytype = new_phytype;
   9154 		new_phytype = WMPHY_BM;
   9155 		new_readreg = wm_gmii_bm_readreg;
   9156 		new_writereg = wm_gmii_bm_writereg;
   9157 	} else if (sc->sc_type >= WM_T_PCH) {
   9158 		/* All PCH* use _hv_ */
   9159 		new_readreg = wm_gmii_hv_readreg;
   9160 		new_writereg = wm_gmii_hv_writereg;
   9161 	} else if (sc->sc_type >= WM_T_ICH8) {
   9162 		/* non-82567 ICH8, 9 and 10 */
   9163 		new_readreg = wm_gmii_i82544_readreg;
   9164 		new_writereg = wm_gmii_i82544_writereg;
   9165 	} else if (sc->sc_type >= WM_T_80003) {
   9166 		/* 80003 */
   9167 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9168 		    && (new_phytype != WMPHY_GG82563)
   9169 		    && (new_phytype != WMPHY_UNKNOWN))
   9170 			doubt_phytype = new_phytype;
   9171 		new_phytype = WMPHY_GG82563;
   9172 		new_readreg = wm_gmii_i80003_readreg;
   9173 		new_writereg = wm_gmii_i80003_writereg;
   9174 	} else if (sc->sc_type >= WM_T_I210) {
   9175 		/* I210 and I211 */
   9176 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9177 		    && (new_phytype != WMPHY_I210)
   9178 		    && (new_phytype != WMPHY_UNKNOWN))
   9179 			doubt_phytype = new_phytype;
   9180 		new_phytype = WMPHY_I210;
   9181 		new_readreg = wm_gmii_gs40g_readreg;
   9182 		new_writereg = wm_gmii_gs40g_writereg;
   9183 	} else if (sc->sc_type >= WM_T_82580) {
   9184 		/* 82580, I350 and I354 */
   9185 		new_readreg = wm_gmii_82580_readreg;
   9186 		new_writereg = wm_gmii_82580_writereg;
   9187 	} else if (sc->sc_type >= WM_T_82544) {
   9188 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9189 		new_readreg = wm_gmii_i82544_readreg;
   9190 		new_writereg = wm_gmii_i82544_writereg;
   9191 	} else {
   9192 		new_readreg = wm_gmii_i82543_readreg;
   9193 		new_writereg = wm_gmii_i82543_writereg;
   9194 	}
   9195 
   9196 	if (new_phytype == WMPHY_BM) {
   9197 		/* All BM use _bm_ */
   9198 		new_readreg = wm_gmii_bm_readreg;
   9199 		new_writereg = wm_gmii_bm_writereg;
   9200 	}
   9201 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9202 		/* All PCH* use _hv_ */
   9203 		new_readreg = wm_gmii_hv_readreg;
   9204 		new_writereg = wm_gmii_hv_writereg;
   9205 	}
   9206 
   9207 	/* Diag output */
   9208 	if (doubt_phytype != WMPHY_UNKNOWN)
   9209 		aprint_error_dev(dev, "Assumed new PHY type was "
   9210 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9211 		    new_phytype);
   9212 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9213 	    && (sc->sc_phytype != new_phytype ))
   9214 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9215 		    "was incorrect. New PHY type = %u\n",
   9216 		    sc->sc_phytype, new_phytype);
   9217 
   9218 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9219 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9220 
   9221 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9222 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9223 		    "function was incorrect.\n");
   9224 
   9225 	/* Update now */
   9226 	sc->sc_phytype = new_phytype;
   9227 	mii->mii_readreg = new_readreg;
   9228 	mii->mii_writereg = new_writereg;
   9229 }
   9230 
   9231 /*
   9232  * wm_get_phy_id_82575:
   9233  *
   9234  * Return PHY ID. Return -1 if it failed.
   9235  */
   9236 static int
   9237 wm_get_phy_id_82575(struct wm_softc *sc)
   9238 {
   9239 	uint32_t reg;
   9240 	int phyid = -1;
   9241 
   9242 	/* XXX */
   9243 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9244 		return -1;
   9245 
   9246 	if (wm_sgmii_uses_mdio(sc)) {
   9247 		switch (sc->sc_type) {
   9248 		case WM_T_82575:
   9249 		case WM_T_82576:
   9250 			reg = CSR_READ(sc, WMREG_MDIC);
   9251 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9252 			break;
   9253 		case WM_T_82580:
   9254 		case WM_T_I350:
   9255 		case WM_T_I354:
   9256 		case WM_T_I210:
   9257 		case WM_T_I211:
   9258 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9259 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9260 			break;
   9261 		default:
   9262 			return -1;
   9263 		}
   9264 	}
   9265 
   9266 	return phyid;
   9267 }
   9268 
   9269 
   9270 /*
   9271  * wm_gmii_mediainit:
   9272  *
   9273  *	Initialize media for use on 1000BASE-T devices.
   9274  */
   9275 static void
   9276 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9277 {
   9278 	device_t dev = sc->sc_dev;
   9279 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9280 	struct mii_data *mii = &sc->sc_mii;
   9281 	uint32_t reg;
   9282 
   9283 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9284 		device_xname(sc->sc_dev), __func__));
   9285 
   9286 	/* We have GMII. */
   9287 	sc->sc_flags |= WM_F_HAS_MII;
   9288 
   9289 	if (sc->sc_type == WM_T_80003)
   9290 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9291 	else
   9292 		sc->sc_tipg = TIPG_1000T_DFLT;
   9293 
   9294 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9295 	if ((sc->sc_type == WM_T_82580)
   9296 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9297 	    || (sc->sc_type == WM_T_I211)) {
   9298 		reg = CSR_READ(sc, WMREG_PHPM);
   9299 		reg &= ~PHPM_GO_LINK_D;
   9300 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9301 	}
   9302 
   9303 	/*
   9304 	 * Let the chip set speed/duplex on its own based on
   9305 	 * signals from the PHY.
   9306 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9307 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9308 	 */
   9309 	sc->sc_ctrl |= CTRL_SLU;
   9310 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9311 
   9312 	/* Initialize our media structures and probe the GMII. */
   9313 	mii->mii_ifp = ifp;
   9314 
   9315 	/*
   9316 	 * The first call of wm_mii_setup_phytype. The result might be
   9317 	 * incorrect.
   9318 	 */
   9319 	wm_gmii_setup_phytype(sc, 0, 0);
   9320 
   9321 	mii->mii_statchg = wm_gmii_statchg;
   9322 
   9323 	/* get PHY control from SMBus to PCIe */
   9324 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9325 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9326 		wm_smbustopci(sc);
   9327 
   9328 	wm_gmii_reset(sc);
   9329 
   9330 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9331 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9332 	    wm_gmii_mediastatus);
   9333 
   9334 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9335 	    || (sc->sc_type == WM_T_82580)
   9336 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9337 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9338 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9339 			/* Attach only one port */
   9340 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9341 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9342 		} else {
   9343 			int i, id;
   9344 			uint32_t ctrl_ext;
   9345 
   9346 			id = wm_get_phy_id_82575(sc);
   9347 			if (id != -1) {
   9348 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9349 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9350 			}
   9351 			if ((id == -1)
   9352 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9353 				/* Power on sgmii phy if it is disabled */
   9354 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9355 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9356 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9357 				CSR_WRITE_FLUSH(sc);
   9358 				delay(300*1000); /* XXX too long */
   9359 
   9360 				/* from 1 to 8 */
   9361 				for (i = 1; i < 8; i++)
   9362 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9363 					    0xffffffff, i, MII_OFFSET_ANY,
   9364 					    MIIF_DOPAUSE);
   9365 
   9366 				/* restore previous sfp cage power state */
   9367 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9368 			}
   9369 		}
   9370 	} else {
   9371 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9372 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9373 	}
   9374 
   9375 	/*
   9376 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9377 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9378 	 */
   9379 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9380 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9381 		wm_set_mdio_slow_mode_hv(sc);
   9382 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9383 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9384 	}
   9385 
   9386 	/*
   9387 	 * (For ICH8 variants)
   9388 	 * If PHY detection failed, use BM's r/w function and retry.
   9389 	 */
   9390 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9391 		/* if failed, retry with *_bm_* */
   9392 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9393 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9394 		    sc->sc_phytype);
   9395 		sc->sc_phytype = WMPHY_BM;
   9396 		mii->mii_readreg = wm_gmii_bm_readreg;
   9397 		mii->mii_writereg = wm_gmii_bm_writereg;
   9398 
   9399 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9400 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9401 	}
   9402 
   9403 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9404 		/* Any PHY wasn't find */
   9405 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9406 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9407 		sc->sc_phytype = WMPHY_NONE;
   9408 	} else {
   9409 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9410 
   9411 		/*
   9412 		 * PHY Found! Check PHY type again by the second call of
   9413 		 * wm_mii_setup_phytype.
   9414 		 */
   9415 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9416 		    child->mii_mpd_model);
   9417 
   9418 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9419 	}
   9420 }
   9421 
   9422 /*
   9423  * wm_gmii_mediachange:	[ifmedia interface function]
   9424  *
   9425  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9426  */
   9427 static int
   9428 wm_gmii_mediachange(struct ifnet *ifp)
   9429 {
   9430 	struct wm_softc *sc = ifp->if_softc;
   9431 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9432 	int rc;
   9433 
   9434 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9435 		device_xname(sc->sc_dev), __func__));
   9436 	if ((ifp->if_flags & IFF_UP) == 0)
   9437 		return 0;
   9438 
   9439 	/* Disable D0 LPLU. */
   9440 	wm_lplu_d0_disable(sc);
   9441 
   9442 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9443 	sc->sc_ctrl |= CTRL_SLU;
   9444 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9445 	    || (sc->sc_type > WM_T_82543)) {
   9446 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9447 	} else {
   9448 		sc->sc_ctrl &= ~CTRL_ASDE;
   9449 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9450 		if (ife->ifm_media & IFM_FDX)
   9451 			sc->sc_ctrl |= CTRL_FD;
   9452 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9453 		case IFM_10_T:
   9454 			sc->sc_ctrl |= CTRL_SPEED_10;
   9455 			break;
   9456 		case IFM_100_TX:
   9457 			sc->sc_ctrl |= CTRL_SPEED_100;
   9458 			break;
   9459 		case IFM_1000_T:
   9460 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9461 			break;
   9462 		default:
   9463 			panic("wm_gmii_mediachange: bad media 0x%x",
   9464 			    ife->ifm_media);
   9465 		}
   9466 	}
   9467 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9468 	CSR_WRITE_FLUSH(sc);
   9469 	if (sc->sc_type <= WM_T_82543)
   9470 		wm_gmii_reset(sc);
   9471 
   9472 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9473 		return 0;
   9474 	return rc;
   9475 }
   9476 
   9477 /*
   9478  * wm_gmii_mediastatus:	[ifmedia interface function]
   9479  *
   9480  *	Get the current interface media status on a 1000BASE-T device.
   9481  */
   9482 static void
   9483 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9484 {
   9485 	struct wm_softc *sc = ifp->if_softc;
   9486 
   9487 	ether_mediastatus(ifp, ifmr);
   9488 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9489 	    | sc->sc_flowflags;
   9490 }
   9491 
   9492 #define	MDI_IO		CTRL_SWDPIN(2)
   9493 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9494 #define	MDI_CLK		CTRL_SWDPIN(3)
   9495 
   9496 static void
   9497 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9498 {
   9499 	uint32_t i, v;
   9500 
   9501 	v = CSR_READ(sc, WMREG_CTRL);
   9502 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9503 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9504 
   9505 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9506 		if (data & i)
   9507 			v |= MDI_IO;
   9508 		else
   9509 			v &= ~MDI_IO;
   9510 		CSR_WRITE(sc, WMREG_CTRL, v);
   9511 		CSR_WRITE_FLUSH(sc);
   9512 		delay(10);
   9513 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9514 		CSR_WRITE_FLUSH(sc);
   9515 		delay(10);
   9516 		CSR_WRITE(sc, WMREG_CTRL, v);
   9517 		CSR_WRITE_FLUSH(sc);
   9518 		delay(10);
   9519 	}
   9520 }
   9521 
   9522 static uint32_t
   9523 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9524 {
   9525 	uint32_t v, i, data = 0;
   9526 
   9527 	v = CSR_READ(sc, WMREG_CTRL);
   9528 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9529 	v |= CTRL_SWDPIO(3);
   9530 
   9531 	CSR_WRITE(sc, WMREG_CTRL, v);
   9532 	CSR_WRITE_FLUSH(sc);
   9533 	delay(10);
   9534 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9535 	CSR_WRITE_FLUSH(sc);
   9536 	delay(10);
   9537 	CSR_WRITE(sc, WMREG_CTRL, v);
   9538 	CSR_WRITE_FLUSH(sc);
   9539 	delay(10);
   9540 
   9541 	for (i = 0; i < 16; i++) {
   9542 		data <<= 1;
   9543 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9544 		CSR_WRITE_FLUSH(sc);
   9545 		delay(10);
   9546 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9547 			data |= 1;
   9548 		CSR_WRITE(sc, WMREG_CTRL, v);
   9549 		CSR_WRITE_FLUSH(sc);
   9550 		delay(10);
   9551 	}
   9552 
   9553 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9554 	CSR_WRITE_FLUSH(sc);
   9555 	delay(10);
   9556 	CSR_WRITE(sc, WMREG_CTRL, v);
   9557 	CSR_WRITE_FLUSH(sc);
   9558 	delay(10);
   9559 
   9560 	return data;
   9561 }
   9562 
   9563 #undef MDI_IO
   9564 #undef MDI_DIR
   9565 #undef MDI_CLK
   9566 
   9567 /*
   9568  * wm_gmii_i82543_readreg:	[mii interface function]
   9569  *
   9570  *	Read a PHY register on the GMII (i82543 version).
   9571  */
   9572 static int
   9573 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9574 {
   9575 	struct wm_softc *sc = device_private(self);
   9576 	int rv;
   9577 
   9578 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9579 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9580 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9581 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9582 
   9583 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9584 	    device_xname(sc->sc_dev), phy, reg, rv));
   9585 
   9586 	return rv;
   9587 }
   9588 
   9589 /*
   9590  * wm_gmii_i82543_writereg:	[mii interface function]
   9591  *
   9592  *	Write a PHY register on the GMII (i82543 version).
   9593  */
   9594 static void
   9595 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9596 {
   9597 	struct wm_softc *sc = device_private(self);
   9598 
   9599 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9600 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9601 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9602 	    (MII_COMMAND_START << 30), 32);
   9603 }
   9604 
   9605 /*
   9606  * wm_gmii_mdic_readreg:	[mii interface function]
   9607  *
   9608  *	Read a PHY register on the GMII.
   9609  */
   9610 static int
   9611 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9612 {
   9613 	struct wm_softc *sc = device_private(self);
   9614 	uint32_t mdic = 0;
   9615 	int i, rv;
   9616 
   9617 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9618 	    MDIC_REGADD(reg));
   9619 
   9620 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9621 		mdic = CSR_READ(sc, WMREG_MDIC);
   9622 		if (mdic & MDIC_READY)
   9623 			break;
   9624 		delay(50);
   9625 	}
   9626 
   9627 	if ((mdic & MDIC_READY) == 0) {
   9628 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9629 		    device_xname(sc->sc_dev), phy, reg);
   9630 		rv = 0;
   9631 	} else if (mdic & MDIC_E) {
   9632 #if 0 /* This is normal if no PHY is present. */
   9633 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9634 		    device_xname(sc->sc_dev), phy, reg);
   9635 #endif
   9636 		rv = 0;
   9637 	} else {
   9638 		rv = MDIC_DATA(mdic);
   9639 		if (rv == 0xffff)
   9640 			rv = 0;
   9641 	}
   9642 
   9643 	return rv;
   9644 }
   9645 
   9646 /*
   9647  * wm_gmii_mdic_writereg:	[mii interface function]
   9648  *
   9649  *	Write a PHY register on the GMII.
   9650  */
   9651 static void
   9652 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9653 {
   9654 	struct wm_softc *sc = device_private(self);
   9655 	uint32_t mdic = 0;
   9656 	int i;
   9657 
   9658 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9659 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9660 
   9661 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9662 		mdic = CSR_READ(sc, WMREG_MDIC);
   9663 		if (mdic & MDIC_READY)
   9664 			break;
   9665 		delay(50);
   9666 	}
   9667 
   9668 	if ((mdic & MDIC_READY) == 0)
   9669 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9670 		    device_xname(sc->sc_dev), phy, reg);
   9671 	else if (mdic & MDIC_E)
   9672 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9673 		    device_xname(sc->sc_dev), phy, reg);
   9674 }
   9675 
   9676 /*
   9677  * wm_gmii_i82544_readreg:	[mii interface function]
   9678  *
   9679  *	Read a PHY register on the GMII.
   9680  */
   9681 static int
   9682 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9683 {
   9684 	struct wm_softc *sc = device_private(self);
   9685 	int rv;
   9686 
   9687 	if (sc->phy.acquire(sc)) {
   9688 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9689 		    __func__);
   9690 		return 0;
   9691 	}
   9692 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9693 	sc->phy.release(sc);
   9694 
   9695 	return rv;
   9696 }
   9697 
   9698 /*
   9699  * wm_gmii_i82544_writereg:	[mii interface function]
   9700  *
   9701  *	Write a PHY register on the GMII.
   9702  */
   9703 static void
   9704 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9705 {
   9706 	struct wm_softc *sc = device_private(self);
   9707 
   9708 	if (sc->phy.acquire(sc)) {
   9709 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9710 		    __func__);
   9711 	}
   9712 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9713 	sc->phy.release(sc);
   9714 }
   9715 
   9716 /*
   9717  * wm_gmii_i80003_readreg:	[mii interface function]
   9718  *
   9719  *	Read a PHY register on the kumeran
   9720  * This could be handled by the PHY layer if we didn't have to lock the
   9721  * ressource ...
   9722  */
   9723 static int
   9724 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9725 {
   9726 	struct wm_softc *sc = device_private(self);
   9727 	int rv;
   9728 
   9729 	if (phy != 1) /* only one PHY on kumeran bus */
   9730 		return 0;
   9731 
   9732 	if (sc->phy.acquire(sc)) {
   9733 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9734 		    __func__);
   9735 		return 0;
   9736 	}
   9737 
   9738 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9739 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9740 		    reg >> GG82563_PAGE_SHIFT);
   9741 	} else {
   9742 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9743 		    reg >> GG82563_PAGE_SHIFT);
   9744 	}
   9745 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9746 	delay(200);
   9747 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9748 	delay(200);
   9749 	sc->phy.release(sc);
   9750 
   9751 	return rv;
   9752 }
   9753 
   9754 /*
   9755  * wm_gmii_i80003_writereg:	[mii interface function]
   9756  *
   9757  *	Write a PHY register on the kumeran.
   9758  * This could be handled by the PHY layer if we didn't have to lock the
   9759  * ressource ...
   9760  */
   9761 static void
   9762 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9763 {
   9764 	struct wm_softc *sc = device_private(self);
   9765 
   9766 	if (phy != 1) /* only one PHY on kumeran bus */
   9767 		return;
   9768 
   9769 	if (sc->phy.acquire(sc)) {
   9770 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9771 		    __func__);
   9772 		return;
   9773 	}
   9774 
   9775 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9776 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9777 		    reg >> GG82563_PAGE_SHIFT);
   9778 	} else {
   9779 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9780 		    reg >> GG82563_PAGE_SHIFT);
   9781 	}
   9782 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9783 	delay(200);
   9784 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9785 	delay(200);
   9786 
   9787 	sc->phy.release(sc);
   9788 }
   9789 
   9790 /*
   9791  * wm_gmii_bm_readreg:	[mii interface function]
   9792  *
   9793  *	Read a PHY register on the kumeran
   9794  * This could be handled by the PHY layer if we didn't have to lock the
   9795  * ressource ...
   9796  */
   9797 static int
   9798 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9799 {
   9800 	struct wm_softc *sc = device_private(self);
   9801 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9802 	uint16_t val;
   9803 	int rv;
   9804 
   9805 	if (sc->phy.acquire(sc)) {
   9806 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9807 		    __func__);
   9808 		return 0;
   9809 	}
   9810 
   9811 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9812 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9813 		    || (reg == 31)) ? 1 : phy;
   9814 	/* Page 800 works differently than the rest so it has its own func */
   9815 	if (page == BM_WUC_PAGE) {
   9816 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9817 		rv = val;
   9818 		goto release;
   9819 	}
   9820 
   9821 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9822 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9823 		    && (sc->sc_type != WM_T_82583))
   9824 			wm_gmii_mdic_writereg(self, phy,
   9825 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9826 		else
   9827 			wm_gmii_mdic_writereg(self, phy,
   9828 			    BME1000_PHY_PAGE_SELECT, page);
   9829 	}
   9830 
   9831 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9832 
   9833 release:
   9834 	sc->phy.release(sc);
   9835 	return rv;
   9836 }
   9837 
   9838 /*
   9839  * wm_gmii_bm_writereg:	[mii interface function]
   9840  *
   9841  *	Write a PHY register on the kumeran.
   9842  * This could be handled by the PHY layer if we didn't have to lock the
   9843  * ressource ...
   9844  */
   9845 static void
   9846 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9847 {
   9848 	struct wm_softc *sc = device_private(self);
   9849 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9850 
   9851 	if (sc->phy.acquire(sc)) {
   9852 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9853 		    __func__);
   9854 		return;
   9855 	}
   9856 
   9857 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9858 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9859 		    || (reg == 31)) ? 1 : phy;
   9860 	/* Page 800 works differently than the rest so it has its own func */
   9861 	if (page == BM_WUC_PAGE) {
   9862 		uint16_t tmp;
   9863 
   9864 		tmp = val;
   9865 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9866 		goto release;
   9867 	}
   9868 
   9869 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9870 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9871 		    && (sc->sc_type != WM_T_82583))
   9872 			wm_gmii_mdic_writereg(self, phy,
   9873 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9874 		else
   9875 			wm_gmii_mdic_writereg(self, phy,
   9876 			    BME1000_PHY_PAGE_SELECT, page);
   9877 	}
   9878 
   9879 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9880 
   9881 release:
   9882 	sc->phy.release(sc);
   9883 }
   9884 
   9885 static void
   9886 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9887 {
   9888 	struct wm_softc *sc = device_private(self);
   9889 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9890 	uint16_t wuce, reg;
   9891 
   9892 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9893 		device_xname(sc->sc_dev), __func__));
   9894 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9895 	if (sc->sc_type == WM_T_PCH) {
   9896 		/* XXX e1000 driver do nothing... why? */
   9897 	}
   9898 
   9899 	/*
   9900 	 * 1) Enable PHY wakeup register first.
   9901 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9902 	 */
   9903 
   9904 	/* Set page 769 */
   9905 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9906 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9907 
   9908 	/* Read WUCE and save it */
   9909 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9910 
   9911 	reg = wuce | BM_WUC_ENABLE_BIT;
   9912 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9913 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9914 
   9915 	/* Select page 800 */
   9916 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9917 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9918 
   9919 	/*
   9920 	 * 2) Access PHY wakeup register.
   9921 	 * See e1000_access_phy_wakeup_reg_bm.
   9922 	 */
   9923 
   9924 	/* Write page 800 */
   9925 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9926 
   9927 	if (rd)
   9928 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9929 	else
   9930 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9931 
   9932 	/*
   9933 	 * 3) Disable PHY wakeup register.
   9934 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9935 	 */
   9936 	/* Set page 769 */
   9937 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9938 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9939 
   9940 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9941 }
   9942 
   9943 /*
   9944  * wm_gmii_hv_readreg:	[mii interface function]
   9945  *
   9946  *	Read a PHY register on the kumeran
   9947  * This could be handled by the PHY layer if we didn't have to lock the
   9948  * ressource ...
   9949  */
   9950 static int
   9951 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9952 {
   9953 	struct wm_softc *sc = device_private(self);
   9954 	int rv;
   9955 
   9956 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9957 		device_xname(sc->sc_dev), __func__));
   9958 	if (sc->phy.acquire(sc)) {
   9959 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9960 		    __func__);
   9961 		return 0;
   9962 	}
   9963 
   9964 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9965 	sc->phy.release(sc);
   9966 	return rv;
   9967 }
   9968 
   9969 static int
   9970 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9971 {
   9972 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9973 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9974 	uint16_t val;
   9975 	int rv;
   9976 
   9977 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9978 
   9979 	/* Page 800 works differently than the rest so it has its own func */
   9980 	if (page == BM_WUC_PAGE) {
   9981 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9982 		return val;
   9983 	}
   9984 
   9985 	/*
   9986 	 * Lower than page 768 works differently than the rest so it has its
   9987 	 * own func
   9988 	 */
   9989 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9990 		printf("gmii_hv_readreg!!!\n");
   9991 		return 0;
   9992 	}
   9993 
   9994 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9995 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9996 		    page << BME1000_PAGE_SHIFT);
   9997 	}
   9998 
   9999 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   10000 	return rv;
   10001 }
   10002 
   10003 /*
   10004  * wm_gmii_hv_writereg:	[mii interface function]
   10005  *
   10006  *	Write a PHY register on the kumeran.
   10007  * This could be handled by the PHY layer if we didn't have to lock the
   10008  * ressource ...
   10009  */
   10010 static void
   10011 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   10012 {
   10013 	struct wm_softc *sc = device_private(self);
   10014 
   10015 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10016 		device_xname(sc->sc_dev), __func__));
   10017 
   10018 	if (sc->phy.acquire(sc)) {
   10019 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10020 		    __func__);
   10021 		return;
   10022 	}
   10023 
   10024 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   10025 	sc->phy.release(sc);
   10026 }
   10027 
   10028 static void
   10029 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   10030 {
   10031 	struct wm_softc *sc = device_private(self);
   10032 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10033 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10034 
   10035 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10036 
   10037 	/* Page 800 works differently than the rest so it has its own func */
   10038 	if (page == BM_WUC_PAGE) {
   10039 		uint16_t tmp;
   10040 
   10041 		tmp = val;
   10042 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   10043 		return;
   10044 	}
   10045 
   10046 	/*
   10047 	 * Lower than page 768 works differently than the rest so it has its
   10048 	 * own func
   10049 	 */
   10050 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10051 		printf("gmii_hv_writereg!!!\n");
   10052 		return;
   10053 	}
   10054 
   10055 	{
   10056 		/*
   10057 		 * XXX Workaround MDIO accesses being disabled after entering
   10058 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10059 		 * register is set)
   10060 		 */
   10061 		if (sc->sc_phytype == WMPHY_82578) {
   10062 			struct mii_softc *child;
   10063 
   10064 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10065 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10066 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10067 			    && ((val & (1 << 11)) != 0)) {
   10068 				printf("XXX need workaround\n");
   10069 			}
   10070 		}
   10071 
   10072 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10073 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10074 			    page << BME1000_PAGE_SHIFT);
   10075 		}
   10076 	}
   10077 
   10078 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10079 }
   10080 
   10081 /*
   10082  * wm_gmii_82580_readreg:	[mii interface function]
   10083  *
   10084  *	Read a PHY register on the 82580 and I350.
   10085  * This could be handled by the PHY layer if we didn't have to lock the
   10086  * ressource ...
   10087  */
   10088 static int
   10089 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10090 {
   10091 	struct wm_softc *sc = device_private(self);
   10092 	int rv;
   10093 
   10094 	if (sc->phy.acquire(sc) != 0) {
   10095 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10096 		    __func__);
   10097 		return 0;
   10098 	}
   10099 
   10100 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10101 
   10102 	sc->phy.release(sc);
   10103 	return rv;
   10104 }
   10105 
   10106 /*
   10107  * wm_gmii_82580_writereg:	[mii interface function]
   10108  *
   10109  *	Write a PHY register on the 82580 and I350.
   10110  * This could be handled by the PHY layer if we didn't have to lock the
   10111  * ressource ...
   10112  */
   10113 static void
   10114 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10115 {
   10116 	struct wm_softc *sc = device_private(self);
   10117 
   10118 	if (sc->phy.acquire(sc) != 0) {
   10119 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10120 		    __func__);
   10121 		return;
   10122 	}
   10123 
   10124 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10125 
   10126 	sc->phy.release(sc);
   10127 }
   10128 
   10129 /*
   10130  * wm_gmii_gs40g_readreg:	[mii interface function]
   10131  *
   10132  *	Read a PHY register on the I2100 and I211.
   10133  * This could be handled by the PHY layer if we didn't have to lock the
   10134  * ressource ...
   10135  */
   10136 static int
   10137 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10138 {
   10139 	struct wm_softc *sc = device_private(self);
   10140 	int page, offset;
   10141 	int rv;
   10142 
   10143 	/* Acquire semaphore */
   10144 	if (sc->phy.acquire(sc)) {
   10145 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10146 		    __func__);
   10147 		return 0;
   10148 	}
   10149 
   10150 	/* Page select */
   10151 	page = reg >> GS40G_PAGE_SHIFT;
   10152 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10153 
   10154 	/* Read reg */
   10155 	offset = reg & GS40G_OFFSET_MASK;
   10156 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10157 
   10158 	sc->phy.release(sc);
   10159 	return rv;
   10160 }
   10161 
   10162 /*
   10163  * wm_gmii_gs40g_writereg:	[mii interface function]
   10164  *
   10165  *	Write a PHY register on the I210 and I211.
   10166  * This could be handled by the PHY layer if we didn't have to lock the
   10167  * ressource ...
   10168  */
   10169 static void
   10170 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10171 {
   10172 	struct wm_softc *sc = device_private(self);
   10173 	int page, offset;
   10174 
   10175 	/* Acquire semaphore */
   10176 	if (sc->phy.acquire(sc)) {
   10177 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10178 		    __func__);
   10179 		return;
   10180 	}
   10181 
   10182 	/* Page select */
   10183 	page = reg >> GS40G_PAGE_SHIFT;
   10184 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10185 
   10186 	/* Write reg */
   10187 	offset = reg & GS40G_OFFSET_MASK;
   10188 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10189 
   10190 	/* Release semaphore */
   10191 	sc->phy.release(sc);
   10192 }
   10193 
   10194 /*
   10195  * wm_gmii_statchg:	[mii interface function]
   10196  *
   10197  *	Callback from MII layer when media changes.
   10198  */
   10199 static void
   10200 wm_gmii_statchg(struct ifnet *ifp)
   10201 {
   10202 	struct wm_softc *sc = ifp->if_softc;
   10203 	struct mii_data *mii = &sc->sc_mii;
   10204 
   10205 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10206 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10207 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10208 
   10209 	/*
   10210 	 * Get flow control negotiation result.
   10211 	 */
   10212 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10213 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10214 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10215 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10216 	}
   10217 
   10218 	if (sc->sc_flowflags & IFM_FLOW) {
   10219 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10220 			sc->sc_ctrl |= CTRL_TFCE;
   10221 			sc->sc_fcrtl |= FCRTL_XONE;
   10222 		}
   10223 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10224 			sc->sc_ctrl |= CTRL_RFCE;
   10225 	}
   10226 
   10227 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10228 		DPRINTF(WM_DEBUG_LINK,
   10229 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10230 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10231 	} else {
   10232 		DPRINTF(WM_DEBUG_LINK,
   10233 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10234 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10235 	}
   10236 
   10237 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10238 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10239 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10240 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10241 	if (sc->sc_type == WM_T_80003) {
   10242 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10243 		case IFM_1000_T:
   10244 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10245 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10246 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10247 			break;
   10248 		default:
   10249 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10250 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10251 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10252 			break;
   10253 		}
   10254 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10255 	}
   10256 }
   10257 
   10258 /* kumeran related (80003, ICH* and PCH*) */
   10259 
   10260 /*
   10261  * wm_kmrn_readreg:
   10262  *
   10263  *	Read a kumeran register
   10264  */
   10265 static int
   10266 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10267 {
   10268 	int rv;
   10269 
   10270 	if (sc->sc_type == WM_T_80003)
   10271 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10272 	else
   10273 		rv = sc->phy.acquire(sc);
   10274 	if (rv != 0) {
   10275 		aprint_error_dev(sc->sc_dev,
   10276 		    "%s: failed to get semaphore\n", __func__);
   10277 		return 0;
   10278 	}
   10279 
   10280 	rv = wm_kmrn_readreg_locked(sc, reg);
   10281 
   10282 	if (sc->sc_type == WM_T_80003)
   10283 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10284 	else
   10285 		sc->phy.release(sc);
   10286 
   10287 	return rv;
   10288 }
   10289 
   10290 static int
   10291 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10292 {
   10293 	int rv;
   10294 
   10295 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10296 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10297 	    KUMCTRLSTA_REN);
   10298 	CSR_WRITE_FLUSH(sc);
   10299 	delay(2);
   10300 
   10301 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10302 
   10303 	return rv;
   10304 }
   10305 
   10306 /*
   10307  * wm_kmrn_writereg:
   10308  *
   10309  *	Write a kumeran register
   10310  */
   10311 static void
   10312 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10313 {
   10314 	int rv;
   10315 
   10316 	if (sc->sc_type == WM_T_80003)
   10317 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10318 	else
   10319 		rv = sc->phy.acquire(sc);
   10320 	if (rv != 0) {
   10321 		aprint_error_dev(sc->sc_dev,
   10322 		    "%s: failed to get semaphore\n", __func__);
   10323 		return;
   10324 	}
   10325 
   10326 	wm_kmrn_writereg_locked(sc, reg, val);
   10327 
   10328 	if (sc->sc_type == WM_T_80003)
   10329 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10330 	else
   10331 		sc->phy.release(sc);
   10332 }
   10333 
   10334 static void
   10335 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10336 {
   10337 
   10338 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10339 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10340 	    (val & KUMCTRLSTA_MASK));
   10341 }
   10342 
   10343 /* SGMII related */
   10344 
   10345 /*
   10346  * wm_sgmii_uses_mdio
   10347  *
   10348  * Check whether the transaction is to the internal PHY or the external
   10349  * MDIO interface. Return true if it's MDIO.
   10350  */
   10351 static bool
   10352 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10353 {
   10354 	uint32_t reg;
   10355 	bool ismdio = false;
   10356 
   10357 	switch (sc->sc_type) {
   10358 	case WM_T_82575:
   10359 	case WM_T_82576:
   10360 		reg = CSR_READ(sc, WMREG_MDIC);
   10361 		ismdio = ((reg & MDIC_DEST) != 0);
   10362 		break;
   10363 	case WM_T_82580:
   10364 	case WM_T_I350:
   10365 	case WM_T_I354:
   10366 	case WM_T_I210:
   10367 	case WM_T_I211:
   10368 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10369 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10370 		break;
   10371 	default:
   10372 		break;
   10373 	}
   10374 
   10375 	return ismdio;
   10376 }
   10377 
   10378 /*
   10379  * wm_sgmii_readreg:	[mii interface function]
   10380  *
   10381  *	Read a PHY register on the SGMII
   10382  * This could be handled by the PHY layer if we didn't have to lock the
   10383  * ressource ...
   10384  */
   10385 static int
   10386 wm_sgmii_readreg(device_t self, int phy, int reg)
   10387 {
   10388 	struct wm_softc *sc = device_private(self);
   10389 	uint32_t i2ccmd;
   10390 	int i, rv;
   10391 
   10392 	if (sc->phy.acquire(sc)) {
   10393 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10394 		    __func__);
   10395 		return 0;
   10396 	}
   10397 
   10398 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10399 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10400 	    | I2CCMD_OPCODE_READ;
   10401 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10402 
   10403 	/* Poll the ready bit */
   10404 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10405 		delay(50);
   10406 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10407 		if (i2ccmd & I2CCMD_READY)
   10408 			break;
   10409 	}
   10410 	if ((i2ccmd & I2CCMD_READY) == 0)
   10411 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10412 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10413 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10414 
   10415 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10416 
   10417 	sc->phy.release(sc);
   10418 	return rv;
   10419 }
   10420 
   10421 /*
   10422  * wm_sgmii_writereg:	[mii interface function]
   10423  *
   10424  *	Write a PHY register on the SGMII.
   10425  * This could be handled by the PHY layer if we didn't have to lock the
   10426  * ressource ...
   10427  */
   10428 static void
   10429 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10430 {
   10431 	struct wm_softc *sc = device_private(self);
   10432 	uint32_t i2ccmd;
   10433 	int i;
   10434 	int val_swapped;
   10435 
   10436 	if (sc->phy.acquire(sc) != 0) {
   10437 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10438 		    __func__);
   10439 		return;
   10440 	}
   10441 	/* Swap the data bytes for the I2C interface */
   10442 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10443 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10444 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10445 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10446 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10447 
   10448 	/* Poll the ready bit */
   10449 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10450 		delay(50);
   10451 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10452 		if (i2ccmd & I2CCMD_READY)
   10453 			break;
   10454 	}
   10455 	if ((i2ccmd & I2CCMD_READY) == 0)
   10456 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10457 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10458 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10459 
   10460 	sc->phy.release(sc);
   10461 }
   10462 
   10463 /* TBI related */
   10464 
   10465 /*
   10466  * wm_tbi_mediainit:
   10467  *
   10468  *	Initialize media for use on 1000BASE-X devices.
   10469  */
   10470 static void
   10471 wm_tbi_mediainit(struct wm_softc *sc)
   10472 {
   10473 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10474 	const char *sep = "";
   10475 
   10476 	if (sc->sc_type < WM_T_82543)
   10477 		sc->sc_tipg = TIPG_WM_DFLT;
   10478 	else
   10479 		sc->sc_tipg = TIPG_LG_DFLT;
   10480 
   10481 	sc->sc_tbi_serdes_anegticks = 5;
   10482 
   10483 	/* Initialize our media structures */
   10484 	sc->sc_mii.mii_ifp = ifp;
   10485 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10486 
   10487 	if ((sc->sc_type >= WM_T_82575)
   10488 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10489 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10490 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10491 	else
   10492 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10493 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10494 
   10495 	/*
   10496 	 * SWD Pins:
   10497 	 *
   10498 	 *	0 = Link LED (output)
   10499 	 *	1 = Loss Of Signal (input)
   10500 	 */
   10501 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10502 
   10503 	/* XXX Perhaps this is only for TBI */
   10504 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10505 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10506 
   10507 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10508 		sc->sc_ctrl &= ~CTRL_LRST;
   10509 
   10510 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10511 
   10512 #define	ADD(ss, mm, dd)							\
   10513 do {									\
   10514 	aprint_normal("%s%s", sep, ss);					\
   10515 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10516 	sep = ", ";							\
   10517 } while (/*CONSTCOND*/0)
   10518 
   10519 	aprint_normal_dev(sc->sc_dev, "");
   10520 
   10521 	if (sc->sc_type == WM_T_I354) {
   10522 		uint32_t status;
   10523 
   10524 		status = CSR_READ(sc, WMREG_STATUS);
   10525 		if (((status & STATUS_2P5_SKU) != 0)
   10526 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10527 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10528 		} else
   10529 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10530 	} else if (sc->sc_type == WM_T_82545) {
   10531 		/* Only 82545 is LX (XXX except SFP) */
   10532 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10533 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10534 	} else {
   10535 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10536 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10537 	}
   10538 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10539 	aprint_normal("\n");
   10540 
   10541 #undef ADD
   10542 
   10543 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10544 }
   10545 
   10546 /*
   10547  * wm_tbi_mediachange:	[ifmedia interface function]
   10548  *
   10549  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10550  */
   10551 static int
   10552 wm_tbi_mediachange(struct ifnet *ifp)
   10553 {
   10554 	struct wm_softc *sc = ifp->if_softc;
   10555 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10556 	uint32_t status;
   10557 	int i;
   10558 
   10559 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10560 		/* XXX need some work for >= 82571 and < 82575 */
   10561 		if (sc->sc_type < WM_T_82575)
   10562 			return 0;
   10563 	}
   10564 
   10565 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10566 	    || (sc->sc_type >= WM_T_82575))
   10567 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10568 
   10569 	sc->sc_ctrl &= ~CTRL_LRST;
   10570 	sc->sc_txcw = TXCW_ANE;
   10571 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10572 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10573 	else if (ife->ifm_media & IFM_FDX)
   10574 		sc->sc_txcw |= TXCW_FD;
   10575 	else
   10576 		sc->sc_txcw |= TXCW_HD;
   10577 
   10578 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10579 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10580 
   10581 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10582 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10583 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10584 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10585 	CSR_WRITE_FLUSH(sc);
   10586 	delay(1000);
   10587 
   10588 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10589 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10590 
   10591 	/*
   10592 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10593 	 * optics detect a signal, 0 if they don't.
   10594 	 */
   10595 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10596 		/* Have signal; wait for the link to come up. */
   10597 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10598 			delay(10000);
   10599 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10600 				break;
   10601 		}
   10602 
   10603 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10604 			    device_xname(sc->sc_dev),i));
   10605 
   10606 		status = CSR_READ(sc, WMREG_STATUS);
   10607 		DPRINTF(WM_DEBUG_LINK,
   10608 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10609 			device_xname(sc->sc_dev),status, STATUS_LU));
   10610 		if (status & STATUS_LU) {
   10611 			/* Link is up. */
   10612 			DPRINTF(WM_DEBUG_LINK,
   10613 			    ("%s: LINK: set media -> link up %s\n",
   10614 			    device_xname(sc->sc_dev),
   10615 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10616 
   10617 			/*
   10618 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10619 			 * so we should update sc->sc_ctrl
   10620 			 */
   10621 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10622 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10623 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10624 			if (status & STATUS_FD)
   10625 				sc->sc_tctl |=
   10626 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10627 			else
   10628 				sc->sc_tctl |=
   10629 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10630 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10631 				sc->sc_fcrtl |= FCRTL_XONE;
   10632 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10633 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10634 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10635 				      sc->sc_fcrtl);
   10636 			sc->sc_tbi_linkup = 1;
   10637 		} else {
   10638 			if (i == WM_LINKUP_TIMEOUT)
   10639 				wm_check_for_link(sc);
   10640 			/* Link is down. */
   10641 			DPRINTF(WM_DEBUG_LINK,
   10642 			    ("%s: LINK: set media -> link down\n",
   10643 			    device_xname(sc->sc_dev)));
   10644 			sc->sc_tbi_linkup = 0;
   10645 		}
   10646 	} else {
   10647 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10648 		    device_xname(sc->sc_dev)));
   10649 		sc->sc_tbi_linkup = 0;
   10650 	}
   10651 
   10652 	wm_tbi_serdes_set_linkled(sc);
   10653 
   10654 	return 0;
   10655 }
   10656 
   10657 /*
   10658  * wm_tbi_mediastatus:	[ifmedia interface function]
   10659  *
   10660  *	Get the current interface media status on a 1000BASE-X device.
   10661  */
   10662 static void
   10663 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10664 {
   10665 	struct wm_softc *sc = ifp->if_softc;
   10666 	uint32_t ctrl, status;
   10667 
   10668 	ifmr->ifm_status = IFM_AVALID;
   10669 	ifmr->ifm_active = IFM_ETHER;
   10670 
   10671 	status = CSR_READ(sc, WMREG_STATUS);
   10672 	if ((status & STATUS_LU) == 0) {
   10673 		ifmr->ifm_active |= IFM_NONE;
   10674 		return;
   10675 	}
   10676 
   10677 	ifmr->ifm_status |= IFM_ACTIVE;
   10678 	/* Only 82545 is LX */
   10679 	if (sc->sc_type == WM_T_82545)
   10680 		ifmr->ifm_active |= IFM_1000_LX;
   10681 	else
   10682 		ifmr->ifm_active |= IFM_1000_SX;
   10683 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10684 		ifmr->ifm_active |= IFM_FDX;
   10685 	else
   10686 		ifmr->ifm_active |= IFM_HDX;
   10687 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10688 	if (ctrl & CTRL_RFCE)
   10689 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10690 	if (ctrl & CTRL_TFCE)
   10691 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10692 }
   10693 
   10694 /* XXX TBI only */
   10695 static int
   10696 wm_check_for_link(struct wm_softc *sc)
   10697 {
   10698 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10699 	uint32_t rxcw;
   10700 	uint32_t ctrl;
   10701 	uint32_t status;
   10702 	uint32_t sig;
   10703 
   10704 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10705 		/* XXX need some work for >= 82571 */
   10706 		if (sc->sc_type >= WM_T_82571) {
   10707 			sc->sc_tbi_linkup = 1;
   10708 			return 0;
   10709 		}
   10710 	}
   10711 
   10712 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10713 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10714 	status = CSR_READ(sc, WMREG_STATUS);
   10715 
   10716 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10717 
   10718 	DPRINTF(WM_DEBUG_LINK,
   10719 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10720 		device_xname(sc->sc_dev), __func__,
   10721 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10722 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10723 
   10724 	/*
   10725 	 * SWDPIN   LU RXCW
   10726 	 *      0    0    0
   10727 	 *      0    0    1	(should not happen)
   10728 	 *      0    1    0	(should not happen)
   10729 	 *      0    1    1	(should not happen)
   10730 	 *      1    0    0	Disable autonego and force linkup
   10731 	 *      1    0    1	got /C/ but not linkup yet
   10732 	 *      1    1    0	(linkup)
   10733 	 *      1    1    1	If IFM_AUTO, back to autonego
   10734 	 *
   10735 	 */
   10736 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10737 	    && ((status & STATUS_LU) == 0)
   10738 	    && ((rxcw & RXCW_C) == 0)) {
   10739 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10740 			__func__));
   10741 		sc->sc_tbi_linkup = 0;
   10742 		/* Disable auto-negotiation in the TXCW register */
   10743 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10744 
   10745 		/*
   10746 		 * Force link-up and also force full-duplex.
   10747 		 *
   10748 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10749 		 * so we should update sc->sc_ctrl
   10750 		 */
   10751 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10752 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10753 	} else if (((status & STATUS_LU) != 0)
   10754 	    && ((rxcw & RXCW_C) != 0)
   10755 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10756 		sc->sc_tbi_linkup = 1;
   10757 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10758 			__func__));
   10759 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10760 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10761 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10762 	    && ((rxcw & RXCW_C) != 0)) {
   10763 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10764 	} else {
   10765 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10766 			status));
   10767 	}
   10768 
   10769 	return 0;
   10770 }
   10771 
   10772 /*
   10773  * wm_tbi_tick:
   10774  *
   10775  *	Check the link on TBI devices.
   10776  *	This function acts as mii_tick().
   10777  */
   10778 static void
   10779 wm_tbi_tick(struct wm_softc *sc)
   10780 {
   10781 	struct mii_data *mii = &sc->sc_mii;
   10782 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10783 	uint32_t status;
   10784 
   10785 	KASSERT(WM_CORE_LOCKED(sc));
   10786 
   10787 	status = CSR_READ(sc, WMREG_STATUS);
   10788 
   10789 	/* XXX is this needed? */
   10790 	(void)CSR_READ(sc, WMREG_RXCW);
   10791 	(void)CSR_READ(sc, WMREG_CTRL);
   10792 
   10793 	/* set link status */
   10794 	if ((status & STATUS_LU) == 0) {
   10795 		DPRINTF(WM_DEBUG_LINK,
   10796 		    ("%s: LINK: checklink -> down\n",
   10797 			device_xname(sc->sc_dev)));
   10798 		sc->sc_tbi_linkup = 0;
   10799 	} else if (sc->sc_tbi_linkup == 0) {
   10800 		DPRINTF(WM_DEBUG_LINK,
   10801 		    ("%s: LINK: checklink -> up %s\n",
   10802 			device_xname(sc->sc_dev),
   10803 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10804 		sc->sc_tbi_linkup = 1;
   10805 		sc->sc_tbi_serdes_ticks = 0;
   10806 	}
   10807 
   10808 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10809 		goto setled;
   10810 
   10811 	if ((status & STATUS_LU) == 0) {
   10812 		sc->sc_tbi_linkup = 0;
   10813 		/* If the timer expired, retry autonegotiation */
   10814 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10815 		    && (++sc->sc_tbi_serdes_ticks
   10816 			>= sc->sc_tbi_serdes_anegticks)) {
   10817 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10818 			sc->sc_tbi_serdes_ticks = 0;
   10819 			/*
   10820 			 * Reset the link, and let autonegotiation do
   10821 			 * its thing
   10822 			 */
   10823 			sc->sc_ctrl |= CTRL_LRST;
   10824 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10825 			CSR_WRITE_FLUSH(sc);
   10826 			delay(1000);
   10827 			sc->sc_ctrl &= ~CTRL_LRST;
   10828 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10829 			CSR_WRITE_FLUSH(sc);
   10830 			delay(1000);
   10831 			CSR_WRITE(sc, WMREG_TXCW,
   10832 			    sc->sc_txcw & ~TXCW_ANE);
   10833 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10834 		}
   10835 	}
   10836 
   10837 setled:
   10838 	wm_tbi_serdes_set_linkled(sc);
   10839 }
   10840 
   10841 /* SERDES related */
   10842 static void
   10843 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10844 {
   10845 	uint32_t reg;
   10846 
   10847 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10848 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10849 		return;
   10850 
   10851 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10852 	reg |= PCS_CFG_PCS_EN;
   10853 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10854 
   10855 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10856 	reg &= ~CTRL_EXT_SWDPIN(3);
   10857 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10858 	CSR_WRITE_FLUSH(sc);
   10859 }
   10860 
   10861 static int
   10862 wm_serdes_mediachange(struct ifnet *ifp)
   10863 {
   10864 	struct wm_softc *sc = ifp->if_softc;
   10865 	bool pcs_autoneg = true; /* XXX */
   10866 	uint32_t ctrl_ext, pcs_lctl, reg;
   10867 
   10868 	/* XXX Currently, this function is not called on 8257[12] */
   10869 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10870 	    || (sc->sc_type >= WM_T_82575))
   10871 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10872 
   10873 	wm_serdes_power_up_link_82575(sc);
   10874 
   10875 	sc->sc_ctrl |= CTRL_SLU;
   10876 
   10877 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10878 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10879 
   10880 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10881 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10882 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10883 	case CTRL_EXT_LINK_MODE_SGMII:
   10884 		pcs_autoneg = true;
   10885 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10886 		break;
   10887 	case CTRL_EXT_LINK_MODE_1000KX:
   10888 		pcs_autoneg = false;
   10889 		/* FALLTHROUGH */
   10890 	default:
   10891 		if ((sc->sc_type == WM_T_82575)
   10892 		    || (sc->sc_type == WM_T_82576)) {
   10893 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10894 				pcs_autoneg = false;
   10895 		}
   10896 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10897 		    | CTRL_FRCFDX;
   10898 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10899 	}
   10900 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10901 
   10902 	if (pcs_autoneg) {
   10903 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10904 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10905 
   10906 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10907 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10908 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10909 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10910 	} else
   10911 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10912 
   10913 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10914 
   10915 
   10916 	return 0;
   10917 }
   10918 
   10919 static void
   10920 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10921 {
   10922 	struct wm_softc *sc = ifp->if_softc;
   10923 	struct mii_data *mii = &sc->sc_mii;
   10924 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10925 	uint32_t pcs_adv, pcs_lpab, reg;
   10926 
   10927 	ifmr->ifm_status = IFM_AVALID;
   10928 	ifmr->ifm_active = IFM_ETHER;
   10929 
   10930 	/* Check PCS */
   10931 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10932 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10933 		ifmr->ifm_active |= IFM_NONE;
   10934 		sc->sc_tbi_linkup = 0;
   10935 		goto setled;
   10936 	}
   10937 
   10938 	sc->sc_tbi_linkup = 1;
   10939 	ifmr->ifm_status |= IFM_ACTIVE;
   10940 	if (sc->sc_type == WM_T_I354) {
   10941 		uint32_t status;
   10942 
   10943 		status = CSR_READ(sc, WMREG_STATUS);
   10944 		if (((status & STATUS_2P5_SKU) != 0)
   10945 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10946 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10947 		} else
   10948 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10949 	} else {
   10950 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10951 		case PCS_LSTS_SPEED_10:
   10952 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10953 			break;
   10954 		case PCS_LSTS_SPEED_100:
   10955 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10956 			break;
   10957 		case PCS_LSTS_SPEED_1000:
   10958 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10959 			break;
   10960 		default:
   10961 			device_printf(sc->sc_dev, "Unknown speed\n");
   10962 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10963 			break;
   10964 		}
   10965 	}
   10966 	if ((reg & PCS_LSTS_FDX) != 0)
   10967 		ifmr->ifm_active |= IFM_FDX;
   10968 	else
   10969 		ifmr->ifm_active |= IFM_HDX;
   10970 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10971 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10972 		/* Check flow */
   10973 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10974 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10975 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10976 			goto setled;
   10977 		}
   10978 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10979 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10980 		DPRINTF(WM_DEBUG_LINK,
   10981 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10982 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10983 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10984 			mii->mii_media_active |= IFM_FLOW
   10985 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10986 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10987 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10988 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10989 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10990 			mii->mii_media_active |= IFM_FLOW
   10991 			    | IFM_ETH_TXPAUSE;
   10992 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10993 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10994 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10995 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10996 			mii->mii_media_active |= IFM_FLOW
   10997 			    | IFM_ETH_RXPAUSE;
   10998 		}
   10999 	}
   11000 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11001 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11002 setled:
   11003 	wm_tbi_serdes_set_linkled(sc);
   11004 }
   11005 
   11006 /*
   11007  * wm_serdes_tick:
   11008  *
   11009  *	Check the link on serdes devices.
   11010  */
   11011 static void
   11012 wm_serdes_tick(struct wm_softc *sc)
   11013 {
   11014 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11015 	struct mii_data *mii = &sc->sc_mii;
   11016 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11017 	uint32_t reg;
   11018 
   11019 	KASSERT(WM_CORE_LOCKED(sc));
   11020 
   11021 	mii->mii_media_status = IFM_AVALID;
   11022 	mii->mii_media_active = IFM_ETHER;
   11023 
   11024 	/* Check PCS */
   11025 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11026 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11027 		mii->mii_media_status |= IFM_ACTIVE;
   11028 		sc->sc_tbi_linkup = 1;
   11029 		sc->sc_tbi_serdes_ticks = 0;
   11030 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11031 		if ((reg & PCS_LSTS_FDX) != 0)
   11032 			mii->mii_media_active |= IFM_FDX;
   11033 		else
   11034 			mii->mii_media_active |= IFM_HDX;
   11035 	} else {
   11036 		mii->mii_media_status |= IFM_NONE;
   11037 		sc->sc_tbi_linkup = 0;
   11038 		/* If the timer expired, retry autonegotiation */
   11039 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11040 		    && (++sc->sc_tbi_serdes_ticks
   11041 			>= sc->sc_tbi_serdes_anegticks)) {
   11042 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11043 			sc->sc_tbi_serdes_ticks = 0;
   11044 			/* XXX */
   11045 			wm_serdes_mediachange(ifp);
   11046 		}
   11047 	}
   11048 
   11049 	wm_tbi_serdes_set_linkled(sc);
   11050 }
   11051 
   11052 /* SFP related */
   11053 
   11054 static int
   11055 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11056 {
   11057 	uint32_t i2ccmd;
   11058 	int i;
   11059 
   11060 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11061 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11062 
   11063 	/* Poll the ready bit */
   11064 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11065 		delay(50);
   11066 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11067 		if (i2ccmd & I2CCMD_READY)
   11068 			break;
   11069 	}
   11070 	if ((i2ccmd & I2CCMD_READY) == 0)
   11071 		return -1;
   11072 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11073 		return -1;
   11074 
   11075 	*data = i2ccmd & 0x00ff;
   11076 
   11077 	return 0;
   11078 }
   11079 
   11080 static uint32_t
   11081 wm_sfp_get_media_type(struct wm_softc *sc)
   11082 {
   11083 	uint32_t ctrl_ext;
   11084 	uint8_t val = 0;
   11085 	int timeout = 3;
   11086 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11087 	int rv = -1;
   11088 
   11089 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11090 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11091 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11092 	CSR_WRITE_FLUSH(sc);
   11093 
   11094 	/* Read SFP module data */
   11095 	while (timeout) {
   11096 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11097 		if (rv == 0)
   11098 			break;
   11099 		delay(100*1000); /* XXX too big */
   11100 		timeout--;
   11101 	}
   11102 	if (rv != 0)
   11103 		goto out;
   11104 	switch (val) {
   11105 	case SFF_SFP_ID_SFF:
   11106 		aprint_normal_dev(sc->sc_dev,
   11107 		    "Module/Connector soldered to board\n");
   11108 		break;
   11109 	case SFF_SFP_ID_SFP:
   11110 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11111 		break;
   11112 	case SFF_SFP_ID_UNKNOWN:
   11113 		goto out;
   11114 	default:
   11115 		break;
   11116 	}
   11117 
   11118 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11119 	if (rv != 0) {
   11120 		goto out;
   11121 	}
   11122 
   11123 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11124 		mediatype = WM_MEDIATYPE_SERDES;
   11125 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11126 		sc->sc_flags |= WM_F_SGMII;
   11127 		mediatype = WM_MEDIATYPE_COPPER;
   11128 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11129 		sc->sc_flags |= WM_F_SGMII;
   11130 		mediatype = WM_MEDIATYPE_SERDES;
   11131 	}
   11132 
   11133 out:
   11134 	/* Restore I2C interface setting */
   11135 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11136 
   11137 	return mediatype;
   11138 }
   11139 
   11140 /*
   11141  * NVM related.
   11142  * Microwire, SPI (w/wo EERD) and Flash.
   11143  */
   11144 
   11145 /* Both spi and uwire */
   11146 
   11147 /*
   11148  * wm_eeprom_sendbits:
   11149  *
   11150  *	Send a series of bits to the EEPROM.
   11151  */
   11152 static void
   11153 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11154 {
   11155 	uint32_t reg;
   11156 	int x;
   11157 
   11158 	reg = CSR_READ(sc, WMREG_EECD);
   11159 
   11160 	for (x = nbits; x > 0; x--) {
   11161 		if (bits & (1U << (x - 1)))
   11162 			reg |= EECD_DI;
   11163 		else
   11164 			reg &= ~EECD_DI;
   11165 		CSR_WRITE(sc, WMREG_EECD, reg);
   11166 		CSR_WRITE_FLUSH(sc);
   11167 		delay(2);
   11168 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11169 		CSR_WRITE_FLUSH(sc);
   11170 		delay(2);
   11171 		CSR_WRITE(sc, WMREG_EECD, reg);
   11172 		CSR_WRITE_FLUSH(sc);
   11173 		delay(2);
   11174 	}
   11175 }
   11176 
   11177 /*
   11178  * wm_eeprom_recvbits:
   11179  *
   11180  *	Receive a series of bits from the EEPROM.
   11181  */
   11182 static void
   11183 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11184 {
   11185 	uint32_t reg, val;
   11186 	int x;
   11187 
   11188 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11189 
   11190 	val = 0;
   11191 	for (x = nbits; x > 0; x--) {
   11192 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11193 		CSR_WRITE_FLUSH(sc);
   11194 		delay(2);
   11195 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11196 			val |= (1U << (x - 1));
   11197 		CSR_WRITE(sc, WMREG_EECD, reg);
   11198 		CSR_WRITE_FLUSH(sc);
   11199 		delay(2);
   11200 	}
   11201 	*valp = val;
   11202 }
   11203 
   11204 /* Microwire */
   11205 
   11206 /*
   11207  * wm_nvm_read_uwire:
   11208  *
   11209  *	Read a word from the EEPROM using the MicroWire protocol.
   11210  */
   11211 static int
   11212 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11213 {
   11214 	uint32_t reg, val;
   11215 	int i;
   11216 
   11217 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11218 		device_xname(sc->sc_dev), __func__));
   11219 
   11220 	for (i = 0; i < wordcnt; i++) {
   11221 		/* Clear SK and DI. */
   11222 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11223 		CSR_WRITE(sc, WMREG_EECD, reg);
   11224 
   11225 		/*
   11226 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11227 		 * and Xen.
   11228 		 *
   11229 		 * We use this workaround only for 82540 because qemu's
   11230 		 * e1000 act as 82540.
   11231 		 */
   11232 		if (sc->sc_type == WM_T_82540) {
   11233 			reg |= EECD_SK;
   11234 			CSR_WRITE(sc, WMREG_EECD, reg);
   11235 			reg &= ~EECD_SK;
   11236 			CSR_WRITE(sc, WMREG_EECD, reg);
   11237 			CSR_WRITE_FLUSH(sc);
   11238 			delay(2);
   11239 		}
   11240 		/* XXX: end of workaround */
   11241 
   11242 		/* Set CHIP SELECT. */
   11243 		reg |= EECD_CS;
   11244 		CSR_WRITE(sc, WMREG_EECD, reg);
   11245 		CSR_WRITE_FLUSH(sc);
   11246 		delay(2);
   11247 
   11248 		/* Shift in the READ command. */
   11249 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11250 
   11251 		/* Shift in address. */
   11252 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11253 
   11254 		/* Shift out the data. */
   11255 		wm_eeprom_recvbits(sc, &val, 16);
   11256 		data[i] = val & 0xffff;
   11257 
   11258 		/* Clear CHIP SELECT. */
   11259 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11260 		CSR_WRITE(sc, WMREG_EECD, reg);
   11261 		CSR_WRITE_FLUSH(sc);
   11262 		delay(2);
   11263 	}
   11264 
   11265 	return 0;
   11266 }
   11267 
   11268 /* SPI */
   11269 
   11270 /*
   11271  * Set SPI and FLASH related information from the EECD register.
   11272  * For 82541 and 82547, the word size is taken from EEPROM.
   11273  */
   11274 static int
   11275 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11276 {
   11277 	int size;
   11278 	uint32_t reg;
   11279 	uint16_t data;
   11280 
   11281 	reg = CSR_READ(sc, WMREG_EECD);
   11282 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11283 
   11284 	/* Read the size of NVM from EECD by default */
   11285 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11286 	switch (sc->sc_type) {
   11287 	case WM_T_82541:
   11288 	case WM_T_82541_2:
   11289 	case WM_T_82547:
   11290 	case WM_T_82547_2:
   11291 		/* Set dummy value to access EEPROM */
   11292 		sc->sc_nvm_wordsize = 64;
   11293 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11294 		reg = data;
   11295 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11296 		if (size == 0)
   11297 			size = 6; /* 64 word size */
   11298 		else
   11299 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11300 		break;
   11301 	case WM_T_80003:
   11302 	case WM_T_82571:
   11303 	case WM_T_82572:
   11304 	case WM_T_82573: /* SPI case */
   11305 	case WM_T_82574: /* SPI case */
   11306 	case WM_T_82583: /* SPI case */
   11307 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11308 		if (size > 14)
   11309 			size = 14;
   11310 		break;
   11311 	case WM_T_82575:
   11312 	case WM_T_82576:
   11313 	case WM_T_82580:
   11314 	case WM_T_I350:
   11315 	case WM_T_I354:
   11316 	case WM_T_I210:
   11317 	case WM_T_I211:
   11318 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11319 		if (size > 15)
   11320 			size = 15;
   11321 		break;
   11322 	default:
   11323 		aprint_error_dev(sc->sc_dev,
   11324 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11325 		return -1;
   11326 		break;
   11327 	}
   11328 
   11329 	sc->sc_nvm_wordsize = 1 << size;
   11330 
   11331 	return 0;
   11332 }
   11333 
   11334 /*
   11335  * wm_nvm_ready_spi:
   11336  *
   11337  *	Wait for a SPI EEPROM to be ready for commands.
   11338  */
   11339 static int
   11340 wm_nvm_ready_spi(struct wm_softc *sc)
   11341 {
   11342 	uint32_t val;
   11343 	int usec;
   11344 
   11345 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11346 		device_xname(sc->sc_dev), __func__));
   11347 
   11348 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11349 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11350 		wm_eeprom_recvbits(sc, &val, 8);
   11351 		if ((val & SPI_SR_RDY) == 0)
   11352 			break;
   11353 	}
   11354 	if (usec >= SPI_MAX_RETRIES) {
   11355 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11356 		return 1;
   11357 	}
   11358 	return 0;
   11359 }
   11360 
   11361 /*
   11362  * wm_nvm_read_spi:
   11363  *
   11364  *	Read a work from the EEPROM using the SPI protocol.
   11365  */
   11366 static int
   11367 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11368 {
   11369 	uint32_t reg, val;
   11370 	int i;
   11371 	uint8_t opc;
   11372 
   11373 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11374 		device_xname(sc->sc_dev), __func__));
   11375 
   11376 	/* Clear SK and CS. */
   11377 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11378 	CSR_WRITE(sc, WMREG_EECD, reg);
   11379 	CSR_WRITE_FLUSH(sc);
   11380 	delay(2);
   11381 
   11382 	if (wm_nvm_ready_spi(sc))
   11383 		return 1;
   11384 
   11385 	/* Toggle CS to flush commands. */
   11386 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11387 	CSR_WRITE_FLUSH(sc);
   11388 	delay(2);
   11389 	CSR_WRITE(sc, WMREG_EECD, reg);
   11390 	CSR_WRITE_FLUSH(sc);
   11391 	delay(2);
   11392 
   11393 	opc = SPI_OPC_READ;
   11394 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11395 		opc |= SPI_OPC_A8;
   11396 
   11397 	wm_eeprom_sendbits(sc, opc, 8);
   11398 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11399 
   11400 	for (i = 0; i < wordcnt; i++) {
   11401 		wm_eeprom_recvbits(sc, &val, 16);
   11402 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11403 	}
   11404 
   11405 	/* Raise CS and clear SK. */
   11406 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11407 	CSR_WRITE(sc, WMREG_EECD, reg);
   11408 	CSR_WRITE_FLUSH(sc);
   11409 	delay(2);
   11410 
   11411 	return 0;
   11412 }
   11413 
   11414 /* Using with EERD */
   11415 
   11416 static int
   11417 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11418 {
   11419 	uint32_t attempts = 100000;
   11420 	uint32_t i, reg = 0;
   11421 	int32_t done = -1;
   11422 
   11423 	for (i = 0; i < attempts; i++) {
   11424 		reg = CSR_READ(sc, rw);
   11425 
   11426 		if (reg & EERD_DONE) {
   11427 			done = 0;
   11428 			break;
   11429 		}
   11430 		delay(5);
   11431 	}
   11432 
   11433 	return done;
   11434 }
   11435 
   11436 static int
   11437 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11438     uint16_t *data)
   11439 {
   11440 	int i, eerd = 0;
   11441 	int error = 0;
   11442 
   11443 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11444 		device_xname(sc->sc_dev), __func__));
   11445 
   11446 	for (i = 0; i < wordcnt; i++) {
   11447 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11448 
   11449 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11450 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11451 		if (error != 0)
   11452 			break;
   11453 
   11454 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11455 	}
   11456 
   11457 	return error;
   11458 }
   11459 
   11460 /* Flash */
   11461 
   11462 static int
   11463 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11464 {
   11465 	uint32_t eecd;
   11466 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11467 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11468 	uint8_t sig_byte = 0;
   11469 
   11470 	switch (sc->sc_type) {
   11471 	case WM_T_PCH_SPT:
   11472 		/*
   11473 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11474 		 * sector valid bits from the NVM.
   11475 		 */
   11476 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11477 		if ((*bank == 0) || (*bank == 1)) {
   11478 			aprint_error_dev(sc->sc_dev,
   11479 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11480 				*bank);
   11481 			return -1;
   11482 		} else {
   11483 			*bank = *bank - 2;
   11484 			return 0;
   11485 		}
   11486 	case WM_T_ICH8:
   11487 	case WM_T_ICH9:
   11488 		eecd = CSR_READ(sc, WMREG_EECD);
   11489 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11490 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11491 			return 0;
   11492 		}
   11493 		/* FALLTHROUGH */
   11494 	default:
   11495 		/* Default to 0 */
   11496 		*bank = 0;
   11497 
   11498 		/* Check bank 0 */
   11499 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11500 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11501 			*bank = 0;
   11502 			return 0;
   11503 		}
   11504 
   11505 		/* Check bank 1 */
   11506 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11507 		    &sig_byte);
   11508 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11509 			*bank = 1;
   11510 			return 0;
   11511 		}
   11512 	}
   11513 
   11514 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11515 		device_xname(sc->sc_dev)));
   11516 	return -1;
   11517 }
   11518 
   11519 /******************************************************************************
   11520  * This function does initial flash setup so that a new read/write/erase cycle
   11521  * can be started.
   11522  *
   11523  * sc - The pointer to the hw structure
   11524  ****************************************************************************/
   11525 static int32_t
   11526 wm_ich8_cycle_init(struct wm_softc *sc)
   11527 {
   11528 	uint16_t hsfsts;
   11529 	int32_t error = 1;
   11530 	int32_t i     = 0;
   11531 
   11532 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11533 
   11534 	/* May be check the Flash Des Valid bit in Hw status */
   11535 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11536 		return error;
   11537 	}
   11538 
   11539 	/* Clear FCERR in Hw status by writing 1 */
   11540 	/* Clear DAEL in Hw status by writing a 1 */
   11541 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11542 
   11543 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11544 
   11545 	/*
   11546 	 * Either we should have a hardware SPI cycle in progress bit to check
   11547 	 * against, in order to start a new cycle or FDONE bit should be
   11548 	 * changed in the hardware so that it is 1 after harware reset, which
   11549 	 * can then be used as an indication whether a cycle is in progress or
   11550 	 * has been completed .. we should also have some software semaphore
   11551 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11552 	 * threads access to those bits can be sequentiallized or a way so that
   11553 	 * 2 threads dont start the cycle at the same time
   11554 	 */
   11555 
   11556 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11557 		/*
   11558 		 * There is no cycle running at present, so we can start a
   11559 		 * cycle
   11560 		 */
   11561 
   11562 		/* Begin by setting Flash Cycle Done. */
   11563 		hsfsts |= HSFSTS_DONE;
   11564 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11565 		error = 0;
   11566 	} else {
   11567 		/*
   11568 		 * otherwise poll for sometime so the current cycle has a
   11569 		 * chance to end before giving up.
   11570 		 */
   11571 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11572 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11573 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11574 				error = 0;
   11575 				break;
   11576 			}
   11577 			delay(1);
   11578 		}
   11579 		if (error == 0) {
   11580 			/*
   11581 			 * Successful in waiting for previous cycle to timeout,
   11582 			 * now set the Flash Cycle Done.
   11583 			 */
   11584 			hsfsts |= HSFSTS_DONE;
   11585 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11586 		}
   11587 	}
   11588 	return error;
   11589 }
   11590 
   11591 /******************************************************************************
   11592  * This function starts a flash cycle and waits for its completion
   11593  *
   11594  * sc - The pointer to the hw structure
   11595  ****************************************************************************/
   11596 static int32_t
   11597 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11598 {
   11599 	uint16_t hsflctl;
   11600 	uint16_t hsfsts;
   11601 	int32_t error = 1;
   11602 	uint32_t i = 0;
   11603 
   11604 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11605 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11606 	hsflctl |= HSFCTL_GO;
   11607 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11608 
   11609 	/* Wait till FDONE bit is set to 1 */
   11610 	do {
   11611 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11612 		if (hsfsts & HSFSTS_DONE)
   11613 			break;
   11614 		delay(1);
   11615 		i++;
   11616 	} while (i < timeout);
   11617 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11618 		error = 0;
   11619 
   11620 	return error;
   11621 }
   11622 
   11623 /******************************************************************************
   11624  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11625  *
   11626  * sc - The pointer to the hw structure
   11627  * index - The index of the byte or word to read.
   11628  * size - Size of data to read, 1=byte 2=word, 4=dword
   11629  * data - Pointer to the word to store the value read.
   11630  *****************************************************************************/
   11631 static int32_t
   11632 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11633     uint32_t size, uint32_t *data)
   11634 {
   11635 	uint16_t hsfsts;
   11636 	uint16_t hsflctl;
   11637 	uint32_t flash_linear_address;
   11638 	uint32_t flash_data = 0;
   11639 	int32_t error = 1;
   11640 	int32_t count = 0;
   11641 
   11642 	if (size < 1  || size > 4 || data == 0x0 ||
   11643 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11644 		return error;
   11645 
   11646 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11647 	    sc->sc_ich8_flash_base;
   11648 
   11649 	do {
   11650 		delay(1);
   11651 		/* Steps */
   11652 		error = wm_ich8_cycle_init(sc);
   11653 		if (error)
   11654 			break;
   11655 
   11656 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11657 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11658 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11659 		    & HSFCTL_BCOUNT_MASK;
   11660 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11661 		if (sc->sc_type == WM_T_PCH_SPT) {
   11662 			/*
   11663 			 * In SPT, This register is in Lan memory space, not
   11664 			 * flash. Therefore, only 32 bit access is supported.
   11665 			 */
   11666 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11667 			    (uint32_t)hsflctl);
   11668 		} else
   11669 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11670 
   11671 		/*
   11672 		 * Write the last 24 bits of index into Flash Linear address
   11673 		 * field in Flash Address
   11674 		 */
   11675 		/* TODO: TBD maybe check the index against the size of flash */
   11676 
   11677 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11678 
   11679 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11680 
   11681 		/*
   11682 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11683 		 * the whole sequence a few more times, else read in (shift in)
   11684 		 * the Flash Data0, the order is least significant byte first
   11685 		 * msb to lsb
   11686 		 */
   11687 		if (error == 0) {
   11688 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11689 			if (size == 1)
   11690 				*data = (uint8_t)(flash_data & 0x000000FF);
   11691 			else if (size == 2)
   11692 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11693 			else if (size == 4)
   11694 				*data = (uint32_t)flash_data;
   11695 			break;
   11696 		} else {
   11697 			/*
   11698 			 * If we've gotten here, then things are probably
   11699 			 * completely hosed, but if the error condition is
   11700 			 * detected, it won't hurt to give it another try...
   11701 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11702 			 */
   11703 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11704 			if (hsfsts & HSFSTS_ERR) {
   11705 				/* Repeat for some time before giving up. */
   11706 				continue;
   11707 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11708 				break;
   11709 		}
   11710 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11711 
   11712 	return error;
   11713 }
   11714 
   11715 /******************************************************************************
   11716  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11717  *
   11718  * sc - pointer to wm_hw structure
   11719  * index - The index of the byte to read.
   11720  * data - Pointer to a byte to store the value read.
   11721  *****************************************************************************/
   11722 static int32_t
   11723 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11724 {
   11725 	int32_t status;
   11726 	uint32_t word = 0;
   11727 
   11728 	status = wm_read_ich8_data(sc, index, 1, &word);
   11729 	if (status == 0)
   11730 		*data = (uint8_t)word;
   11731 	else
   11732 		*data = 0;
   11733 
   11734 	return status;
   11735 }
   11736 
   11737 /******************************************************************************
   11738  * Reads a word from the NVM using the ICH8 flash access registers.
   11739  *
   11740  * sc - pointer to wm_hw structure
   11741  * index - The starting byte index of the word to read.
   11742  * data - Pointer to a word to store the value read.
   11743  *****************************************************************************/
   11744 static int32_t
   11745 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11746 {
   11747 	int32_t status;
   11748 	uint32_t word = 0;
   11749 
   11750 	status = wm_read_ich8_data(sc, index, 2, &word);
   11751 	if (status == 0)
   11752 		*data = (uint16_t)word;
   11753 	else
   11754 		*data = 0;
   11755 
   11756 	return status;
   11757 }
   11758 
   11759 /******************************************************************************
   11760  * Reads a dword from the NVM using the ICH8 flash access registers.
   11761  *
   11762  * sc - pointer to wm_hw structure
   11763  * index - The starting byte index of the word to read.
   11764  * data - Pointer to a word to store the value read.
   11765  *****************************************************************************/
   11766 static int32_t
   11767 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11768 {
   11769 	int32_t status;
   11770 
   11771 	status = wm_read_ich8_data(sc, index, 4, data);
   11772 	return status;
   11773 }
   11774 
   11775 /******************************************************************************
   11776  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11777  * register.
   11778  *
   11779  * sc - Struct containing variables accessed by shared code
   11780  * offset - offset of word in the EEPROM to read
   11781  * data - word read from the EEPROM
   11782  * words - number of words to read
   11783  *****************************************************************************/
   11784 static int
   11785 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11786 {
   11787 	int32_t  error = 0;
   11788 	uint32_t flash_bank = 0;
   11789 	uint32_t act_offset = 0;
   11790 	uint32_t bank_offset = 0;
   11791 	uint16_t word = 0;
   11792 	uint16_t i = 0;
   11793 
   11794 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11795 		device_xname(sc->sc_dev), __func__));
   11796 
   11797 	/*
   11798 	 * We need to know which is the valid flash bank.  In the event
   11799 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11800 	 * managing flash_bank.  So it cannot be trusted and needs
   11801 	 * to be updated with each read.
   11802 	 */
   11803 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11804 	if (error) {
   11805 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11806 			device_xname(sc->sc_dev)));
   11807 		flash_bank = 0;
   11808 	}
   11809 
   11810 	/*
   11811 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11812 	 * size
   11813 	 */
   11814 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11815 
   11816 	error = wm_get_swfwhw_semaphore(sc);
   11817 	if (error) {
   11818 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11819 		    __func__);
   11820 		return error;
   11821 	}
   11822 
   11823 	for (i = 0; i < words; i++) {
   11824 		/* The NVM part needs a byte offset, hence * 2 */
   11825 		act_offset = bank_offset + ((offset + i) * 2);
   11826 		error = wm_read_ich8_word(sc, act_offset, &word);
   11827 		if (error) {
   11828 			aprint_error_dev(sc->sc_dev,
   11829 			    "%s: failed to read NVM\n", __func__);
   11830 			break;
   11831 		}
   11832 		data[i] = word;
   11833 	}
   11834 
   11835 	wm_put_swfwhw_semaphore(sc);
   11836 	return error;
   11837 }
   11838 
   11839 /******************************************************************************
   11840  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11841  * register.
   11842  *
   11843  * sc - Struct containing variables accessed by shared code
   11844  * offset - offset of word in the EEPROM to read
   11845  * data - word read from the EEPROM
   11846  * words - number of words to read
   11847  *****************************************************************************/
   11848 static int
   11849 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11850 {
   11851 	int32_t  error = 0;
   11852 	uint32_t flash_bank = 0;
   11853 	uint32_t act_offset = 0;
   11854 	uint32_t bank_offset = 0;
   11855 	uint32_t dword = 0;
   11856 	uint16_t i = 0;
   11857 
   11858 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11859 		device_xname(sc->sc_dev), __func__));
   11860 
   11861 	/*
   11862 	 * We need to know which is the valid flash bank.  In the event
   11863 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11864 	 * managing flash_bank.  So it cannot be trusted and needs
   11865 	 * to be updated with each read.
   11866 	 */
   11867 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11868 	if (error) {
   11869 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11870 			device_xname(sc->sc_dev)));
   11871 		flash_bank = 0;
   11872 	}
   11873 
   11874 	/*
   11875 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11876 	 * size
   11877 	 */
   11878 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11879 
   11880 	error = wm_get_swfwhw_semaphore(sc);
   11881 	if (error) {
   11882 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11883 		    __func__);
   11884 		return error;
   11885 	}
   11886 
   11887 	for (i = 0; i < words; i++) {
   11888 		/* The NVM part needs a byte offset, hence * 2 */
   11889 		act_offset = bank_offset + ((offset + i) * 2);
   11890 		/* but we must read dword aligned, so mask ... */
   11891 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11892 		if (error) {
   11893 			aprint_error_dev(sc->sc_dev,
   11894 			    "%s: failed to read NVM\n", __func__);
   11895 			break;
   11896 		}
   11897 		/* ... and pick out low or high word */
   11898 		if ((act_offset & 0x2) == 0)
   11899 			data[i] = (uint16_t)(dword & 0xFFFF);
   11900 		else
   11901 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11902 	}
   11903 
   11904 	wm_put_swfwhw_semaphore(sc);
   11905 	return error;
   11906 }
   11907 
   11908 /* iNVM */
   11909 
   11910 static int
   11911 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11912 {
   11913 	int32_t  rv = 0;
   11914 	uint32_t invm_dword;
   11915 	uint16_t i;
   11916 	uint8_t record_type, word_address;
   11917 
   11918 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11919 		device_xname(sc->sc_dev), __func__));
   11920 
   11921 	for (i = 0; i < INVM_SIZE; i++) {
   11922 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11923 		/* Get record type */
   11924 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11925 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11926 			break;
   11927 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11928 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11929 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11930 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11931 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11932 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11933 			if (word_address == address) {
   11934 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11935 				rv = 0;
   11936 				break;
   11937 			}
   11938 		}
   11939 	}
   11940 
   11941 	return rv;
   11942 }
   11943 
   11944 static int
   11945 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11946 {
   11947 	int rv = 0;
   11948 	int i;
   11949 
   11950 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11951 		device_xname(sc->sc_dev), __func__));
   11952 
   11953 	for (i = 0; i < words; i++) {
   11954 		switch (offset + i) {
   11955 		case NVM_OFF_MACADDR:
   11956 		case NVM_OFF_MACADDR1:
   11957 		case NVM_OFF_MACADDR2:
   11958 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11959 			if (rv != 0) {
   11960 				data[i] = 0xffff;
   11961 				rv = -1;
   11962 			}
   11963 			break;
   11964 		case NVM_OFF_CFG2:
   11965 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11966 			if (rv != 0) {
   11967 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11968 				rv = 0;
   11969 			}
   11970 			break;
   11971 		case NVM_OFF_CFG4:
   11972 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11973 			if (rv != 0) {
   11974 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11975 				rv = 0;
   11976 			}
   11977 			break;
   11978 		case NVM_OFF_LED_1_CFG:
   11979 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11980 			if (rv != 0) {
   11981 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11982 				rv = 0;
   11983 			}
   11984 			break;
   11985 		case NVM_OFF_LED_0_2_CFG:
   11986 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11987 			if (rv != 0) {
   11988 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11989 				rv = 0;
   11990 			}
   11991 			break;
   11992 		case NVM_OFF_ID_LED_SETTINGS:
   11993 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11994 			if (rv != 0) {
   11995 				*data = ID_LED_RESERVED_FFFF;
   11996 				rv = 0;
   11997 			}
   11998 			break;
   11999 		default:
   12000 			DPRINTF(WM_DEBUG_NVM,
   12001 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12002 			*data = NVM_RESERVED_WORD;
   12003 			break;
   12004 		}
   12005 	}
   12006 
   12007 	return rv;
   12008 }
   12009 
   12010 /* Lock, detecting NVM type, validate checksum, version and read */
   12011 
   12012 /*
   12013  * wm_nvm_acquire:
   12014  *
   12015  *	Perform the EEPROM handshake required on some chips.
   12016  */
   12017 static int
   12018 wm_nvm_acquire(struct wm_softc *sc)
   12019 {
   12020 	uint32_t reg;
   12021 	int x;
   12022 	int ret = 0;
   12023 
   12024 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12025 		device_xname(sc->sc_dev), __func__));
   12026 
   12027 	if (sc->sc_type >= WM_T_ICH8) {
   12028 		ret = wm_get_nvm_ich8lan(sc);
   12029 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12030 		ret = wm_get_swfwhw_semaphore(sc);
   12031 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12032 		/* This will also do wm_get_swsm_semaphore() if needed */
   12033 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12034 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12035 		ret = wm_get_swsm_semaphore(sc);
   12036 	}
   12037 
   12038 	if (ret) {
   12039 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12040 			__func__);
   12041 		return 1;
   12042 	}
   12043 
   12044 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12045 		reg = CSR_READ(sc, WMREG_EECD);
   12046 
   12047 		/* Request EEPROM access. */
   12048 		reg |= EECD_EE_REQ;
   12049 		CSR_WRITE(sc, WMREG_EECD, reg);
   12050 
   12051 		/* ..and wait for it to be granted. */
   12052 		for (x = 0; x < 1000; x++) {
   12053 			reg = CSR_READ(sc, WMREG_EECD);
   12054 			if (reg & EECD_EE_GNT)
   12055 				break;
   12056 			delay(5);
   12057 		}
   12058 		if ((reg & EECD_EE_GNT) == 0) {
   12059 			aprint_error_dev(sc->sc_dev,
   12060 			    "could not acquire EEPROM GNT\n");
   12061 			reg &= ~EECD_EE_REQ;
   12062 			CSR_WRITE(sc, WMREG_EECD, reg);
   12063 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12064 				wm_put_swfwhw_semaphore(sc);
   12065 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12066 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12067 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12068 				wm_put_swsm_semaphore(sc);
   12069 			return 1;
   12070 		}
   12071 	}
   12072 
   12073 	return 0;
   12074 }
   12075 
   12076 /*
   12077  * wm_nvm_release:
   12078  *
   12079  *	Release the EEPROM mutex.
   12080  */
   12081 static void
   12082 wm_nvm_release(struct wm_softc *sc)
   12083 {
   12084 	uint32_t reg;
   12085 
   12086 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12087 		device_xname(sc->sc_dev), __func__));
   12088 
   12089 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12090 		reg = CSR_READ(sc, WMREG_EECD);
   12091 		reg &= ~EECD_EE_REQ;
   12092 		CSR_WRITE(sc, WMREG_EECD, reg);
   12093 	}
   12094 
   12095 	if (sc->sc_type >= WM_T_ICH8) {
   12096 		wm_put_nvm_ich8lan(sc);
   12097 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12098 		wm_put_swfwhw_semaphore(sc);
   12099 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12100 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12101 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12102 		wm_put_swsm_semaphore(sc);
   12103 }
   12104 
   12105 static int
   12106 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12107 {
   12108 	uint32_t eecd = 0;
   12109 
   12110 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12111 	    || sc->sc_type == WM_T_82583) {
   12112 		eecd = CSR_READ(sc, WMREG_EECD);
   12113 
   12114 		/* Isolate bits 15 & 16 */
   12115 		eecd = ((eecd >> 15) & 0x03);
   12116 
   12117 		/* If both bits are set, device is Flash type */
   12118 		if (eecd == 0x03)
   12119 			return 0;
   12120 	}
   12121 	return 1;
   12122 }
   12123 
   12124 static int
   12125 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12126 {
   12127 	uint32_t eec;
   12128 
   12129 	eec = CSR_READ(sc, WMREG_EEC);
   12130 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12131 		return 1;
   12132 
   12133 	return 0;
   12134 }
   12135 
   12136 /*
   12137  * wm_nvm_validate_checksum
   12138  *
   12139  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12140  */
   12141 static int
   12142 wm_nvm_validate_checksum(struct wm_softc *sc)
   12143 {
   12144 	uint16_t checksum;
   12145 	uint16_t eeprom_data;
   12146 #ifdef WM_DEBUG
   12147 	uint16_t csum_wordaddr, valid_checksum;
   12148 #endif
   12149 	int i;
   12150 
   12151 	checksum = 0;
   12152 
   12153 	/* Don't check for I211 */
   12154 	if (sc->sc_type == WM_T_I211)
   12155 		return 0;
   12156 
   12157 #ifdef WM_DEBUG
   12158 	if (sc->sc_type == WM_T_PCH_LPT) {
   12159 		csum_wordaddr = NVM_OFF_COMPAT;
   12160 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12161 	} else {
   12162 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12163 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12164 	}
   12165 
   12166 	/* Dump EEPROM image for debug */
   12167 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12168 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12169 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12170 		/* XXX PCH_SPT? */
   12171 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12172 		if ((eeprom_data & valid_checksum) == 0) {
   12173 			DPRINTF(WM_DEBUG_NVM,
   12174 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12175 				device_xname(sc->sc_dev), eeprom_data,
   12176 				    valid_checksum));
   12177 		}
   12178 	}
   12179 
   12180 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12181 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12182 		for (i = 0; i < NVM_SIZE; i++) {
   12183 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12184 				printf("XXXX ");
   12185 			else
   12186 				printf("%04hx ", eeprom_data);
   12187 			if (i % 8 == 7)
   12188 				printf("\n");
   12189 		}
   12190 	}
   12191 
   12192 #endif /* WM_DEBUG */
   12193 
   12194 	for (i = 0; i < NVM_SIZE; i++) {
   12195 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12196 			return 1;
   12197 		checksum += eeprom_data;
   12198 	}
   12199 
   12200 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12201 #ifdef WM_DEBUG
   12202 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12203 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12204 #endif
   12205 	}
   12206 
   12207 	return 0;
   12208 }
   12209 
   12210 static void
   12211 wm_nvm_version_invm(struct wm_softc *sc)
   12212 {
   12213 	uint32_t dword;
   12214 
   12215 	/*
   12216 	 * Linux's code to decode version is very strange, so we don't
   12217 	 * obey that algorithm and just use word 61 as the document.
   12218 	 * Perhaps it's not perfect though...
   12219 	 *
   12220 	 * Example:
   12221 	 *
   12222 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12223 	 */
   12224 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12225 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12226 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12227 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12228 }
   12229 
   12230 static void
   12231 wm_nvm_version(struct wm_softc *sc)
   12232 {
   12233 	uint16_t major, minor, build, patch;
   12234 	uint16_t uid0, uid1;
   12235 	uint16_t nvm_data;
   12236 	uint16_t off;
   12237 	bool check_version = false;
   12238 	bool check_optionrom = false;
   12239 	bool have_build = false;
   12240 	bool have_uid = true;
   12241 
   12242 	/*
   12243 	 * Version format:
   12244 	 *
   12245 	 * XYYZ
   12246 	 * X0YZ
   12247 	 * X0YY
   12248 	 *
   12249 	 * Example:
   12250 	 *
   12251 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12252 	 *	82571	0x50a6	5.10.6?
   12253 	 *	82572	0x506a	5.6.10?
   12254 	 *	82572EI	0x5069	5.6.9?
   12255 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12256 	 *		0x2013	2.1.3?
   12257 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12258 	 */
   12259 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12260 	switch (sc->sc_type) {
   12261 	case WM_T_82571:
   12262 	case WM_T_82572:
   12263 	case WM_T_82574:
   12264 	case WM_T_82583:
   12265 		check_version = true;
   12266 		check_optionrom = true;
   12267 		have_build = true;
   12268 		break;
   12269 	case WM_T_82575:
   12270 	case WM_T_82576:
   12271 	case WM_T_82580:
   12272 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12273 			check_version = true;
   12274 		break;
   12275 	case WM_T_I211:
   12276 		wm_nvm_version_invm(sc);
   12277 		have_uid = false;
   12278 		goto printver;
   12279 	case WM_T_I210:
   12280 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12281 			wm_nvm_version_invm(sc);
   12282 			have_uid = false;
   12283 			goto printver;
   12284 		}
   12285 		/* FALLTHROUGH */
   12286 	case WM_T_I350:
   12287 	case WM_T_I354:
   12288 		check_version = true;
   12289 		check_optionrom = true;
   12290 		break;
   12291 	default:
   12292 		return;
   12293 	}
   12294 	if (check_version) {
   12295 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12296 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12297 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12298 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12299 			build = nvm_data & NVM_BUILD_MASK;
   12300 			have_build = true;
   12301 		} else
   12302 			minor = nvm_data & 0x00ff;
   12303 
   12304 		/* Decimal */
   12305 		minor = (minor / 16) * 10 + (minor % 16);
   12306 		sc->sc_nvm_ver_major = major;
   12307 		sc->sc_nvm_ver_minor = minor;
   12308 
   12309 printver:
   12310 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12311 		    sc->sc_nvm_ver_minor);
   12312 		if (have_build) {
   12313 			sc->sc_nvm_ver_build = build;
   12314 			aprint_verbose(".%d", build);
   12315 		}
   12316 	}
   12317 	if (check_optionrom) {
   12318 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12319 		/* Option ROM Version */
   12320 		if ((off != 0x0000) && (off != 0xffff)) {
   12321 			off += NVM_COMBO_VER_OFF;
   12322 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12323 			wm_nvm_read(sc, off, 1, &uid0);
   12324 			if ((uid0 != 0) && (uid0 != 0xffff)
   12325 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12326 				/* 16bits */
   12327 				major = uid0 >> 8;
   12328 				build = (uid0 << 8) | (uid1 >> 8);
   12329 				patch = uid1 & 0x00ff;
   12330 				aprint_verbose(", option ROM Version %d.%d.%d",
   12331 				    major, build, patch);
   12332 			}
   12333 		}
   12334 	}
   12335 
   12336 	if (have_uid) {
   12337 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12338 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12339 	}
   12340 }
   12341 
   12342 /*
   12343  * wm_nvm_read:
   12344  *
   12345  *	Read data from the serial EEPROM.
   12346  */
   12347 static int
   12348 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12349 {
   12350 	int rv;
   12351 
   12352 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12353 		device_xname(sc->sc_dev), __func__));
   12354 
   12355 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12356 		return 1;
   12357 
   12358 	if (wm_nvm_acquire(sc))
   12359 		return 1;
   12360 
   12361 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12362 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12363 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12364 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12365 	else if (sc->sc_type == WM_T_PCH_SPT)
   12366 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12367 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12368 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12369 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12370 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12371 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12372 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12373 	else
   12374 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12375 
   12376 	wm_nvm_release(sc);
   12377 	return rv;
   12378 }
   12379 
   12380 /*
   12381  * Hardware semaphores.
   12382  * Very complexed...
   12383  */
   12384 
   12385 static int
   12386 wm_get_null(struct wm_softc *sc)
   12387 {
   12388 
   12389 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12390 		device_xname(sc->sc_dev), __func__));
   12391 	return 0;
   12392 }
   12393 
   12394 static void
   12395 wm_put_null(struct wm_softc *sc)
   12396 {
   12397 
   12398 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12399 		device_xname(sc->sc_dev), __func__));
   12400 	return;
   12401 }
   12402 
   12403 /*
   12404  * Get hardware semaphore.
   12405  * Same as e1000_get_hw_semaphore_generic()
   12406  */
   12407 static int
   12408 wm_get_swsm_semaphore(struct wm_softc *sc)
   12409 {
   12410 	int32_t timeout;
   12411 	uint32_t swsm;
   12412 
   12413 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12414 		device_xname(sc->sc_dev), __func__));
   12415 	KASSERT(sc->sc_nvm_wordsize > 0);
   12416 
   12417 	/* Get the SW semaphore. */
   12418 	timeout = sc->sc_nvm_wordsize + 1;
   12419 	while (timeout) {
   12420 		swsm = CSR_READ(sc, WMREG_SWSM);
   12421 
   12422 		if ((swsm & SWSM_SMBI) == 0)
   12423 			break;
   12424 
   12425 		delay(50);
   12426 		timeout--;
   12427 	}
   12428 
   12429 	if (timeout == 0) {
   12430 		aprint_error_dev(sc->sc_dev,
   12431 		    "could not acquire SWSM SMBI\n");
   12432 		return 1;
   12433 	}
   12434 
   12435 	/* Get the FW semaphore. */
   12436 	timeout = sc->sc_nvm_wordsize + 1;
   12437 	while (timeout) {
   12438 		swsm = CSR_READ(sc, WMREG_SWSM);
   12439 		swsm |= SWSM_SWESMBI;
   12440 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12441 		/* If we managed to set the bit we got the semaphore. */
   12442 		swsm = CSR_READ(sc, WMREG_SWSM);
   12443 		if (swsm & SWSM_SWESMBI)
   12444 			break;
   12445 
   12446 		delay(50);
   12447 		timeout--;
   12448 	}
   12449 
   12450 	if (timeout == 0) {
   12451 		aprint_error_dev(sc->sc_dev,
   12452 		    "could not acquire SWSM SWESMBI\n");
   12453 		/* Release semaphores */
   12454 		wm_put_swsm_semaphore(sc);
   12455 		return 1;
   12456 	}
   12457 	return 0;
   12458 }
   12459 
   12460 /*
   12461  * Put hardware semaphore.
   12462  * Same as e1000_put_hw_semaphore_generic()
   12463  */
   12464 static void
   12465 wm_put_swsm_semaphore(struct wm_softc *sc)
   12466 {
   12467 	uint32_t swsm;
   12468 
   12469 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12470 		device_xname(sc->sc_dev), __func__));
   12471 
   12472 	swsm = CSR_READ(sc, WMREG_SWSM);
   12473 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12474 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12475 }
   12476 
   12477 /*
   12478  * Get SW/FW semaphore.
   12479  * Same as e1000_acquire_swfw_sync_82575().
   12480  */
   12481 static int
   12482 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12483 {
   12484 	uint32_t swfw_sync;
   12485 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12486 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12487 	int timeout = 200;
   12488 
   12489 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12490 		device_xname(sc->sc_dev), __func__));
   12491 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12492 
   12493 	for (timeout = 0; timeout < 200; timeout++) {
   12494 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12495 			if (wm_get_swsm_semaphore(sc)) {
   12496 				aprint_error_dev(sc->sc_dev,
   12497 				    "%s: failed to get semaphore\n",
   12498 				    __func__);
   12499 				return 1;
   12500 			}
   12501 		}
   12502 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12503 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12504 			swfw_sync |= swmask;
   12505 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12506 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12507 				wm_put_swsm_semaphore(sc);
   12508 			return 0;
   12509 		}
   12510 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12511 			wm_put_swsm_semaphore(sc);
   12512 		delay(5000);
   12513 	}
   12514 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12515 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12516 	return 1;
   12517 }
   12518 
   12519 static void
   12520 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12521 {
   12522 	uint32_t swfw_sync;
   12523 
   12524 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12525 		device_xname(sc->sc_dev), __func__));
   12526 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12527 
   12528 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12529 		while (wm_get_swsm_semaphore(sc) != 0)
   12530 			continue;
   12531 	}
   12532 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12533 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12534 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12535 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12536 		wm_put_swsm_semaphore(sc);
   12537 }
   12538 
   12539 static int
   12540 wm_get_phy_82575(struct wm_softc *sc)
   12541 {
   12542 
   12543 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12544 		device_xname(sc->sc_dev), __func__));
   12545 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12546 }
   12547 
   12548 static void
   12549 wm_put_phy_82575(struct wm_softc *sc)
   12550 {
   12551 
   12552 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12553 		device_xname(sc->sc_dev), __func__));
   12554 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12555 }
   12556 
   12557 static int
   12558 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12559 {
   12560 	uint32_t ext_ctrl;
   12561 	int timeout = 200;
   12562 
   12563 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12564 		device_xname(sc->sc_dev), __func__));
   12565 
   12566 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12567 	for (timeout = 0; timeout < 200; timeout++) {
   12568 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12569 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12570 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12571 
   12572 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12573 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12574 			return 0;
   12575 		delay(5000);
   12576 	}
   12577 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12578 	    device_xname(sc->sc_dev), ext_ctrl);
   12579 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12580 	return 1;
   12581 }
   12582 
   12583 static void
   12584 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12585 {
   12586 	uint32_t ext_ctrl;
   12587 
   12588 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12589 		device_xname(sc->sc_dev), __func__));
   12590 
   12591 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12592 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12593 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12594 
   12595 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12596 }
   12597 
   12598 static int
   12599 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12600 {
   12601 	uint32_t ext_ctrl;
   12602 	int timeout;
   12603 
   12604 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12605 		device_xname(sc->sc_dev), __func__));
   12606 	mutex_enter(sc->sc_ich_phymtx);
   12607 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12608 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12609 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12610 			break;
   12611 		delay(1000);
   12612 	}
   12613 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12614 		printf("%s: SW has already locked the resource\n",
   12615 		    device_xname(sc->sc_dev));
   12616 		goto out;
   12617 	}
   12618 
   12619 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12620 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12621 	for (timeout = 0; timeout < 1000; timeout++) {
   12622 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12623 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12624 			break;
   12625 		delay(1000);
   12626 	}
   12627 	if (timeout >= 1000) {
   12628 		printf("%s: failed to acquire semaphore\n",
   12629 		    device_xname(sc->sc_dev));
   12630 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12631 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12632 		goto out;
   12633 	}
   12634 	return 0;
   12635 
   12636 out:
   12637 	mutex_exit(sc->sc_ich_phymtx);
   12638 	return 1;
   12639 }
   12640 
   12641 static void
   12642 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12643 {
   12644 	uint32_t ext_ctrl;
   12645 
   12646 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12647 		device_xname(sc->sc_dev), __func__));
   12648 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12649 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12650 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12651 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12652 	} else {
   12653 		printf("%s: Semaphore unexpectedly released\n",
   12654 		    device_xname(sc->sc_dev));
   12655 	}
   12656 
   12657 	mutex_exit(sc->sc_ich_phymtx);
   12658 }
   12659 
   12660 static int
   12661 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12662 {
   12663 
   12664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12665 		device_xname(sc->sc_dev), __func__));
   12666 	mutex_enter(sc->sc_ich_nvmmtx);
   12667 
   12668 	return 0;
   12669 }
   12670 
   12671 static void
   12672 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12673 {
   12674 
   12675 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12676 		device_xname(sc->sc_dev), __func__));
   12677 	mutex_exit(sc->sc_ich_nvmmtx);
   12678 }
   12679 
   12680 static int
   12681 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12682 {
   12683 	int i = 0;
   12684 	uint32_t reg;
   12685 
   12686 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12687 		device_xname(sc->sc_dev), __func__));
   12688 
   12689 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12690 	do {
   12691 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12692 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12693 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12694 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12695 			break;
   12696 		delay(2*1000);
   12697 		i++;
   12698 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12699 
   12700 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12701 		wm_put_hw_semaphore_82573(sc);
   12702 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12703 		    device_xname(sc->sc_dev));
   12704 		return -1;
   12705 	}
   12706 
   12707 	return 0;
   12708 }
   12709 
   12710 static void
   12711 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12712 {
   12713 	uint32_t reg;
   12714 
   12715 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12716 		device_xname(sc->sc_dev), __func__));
   12717 
   12718 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12719 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12720 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12721 }
   12722 
   12723 /*
   12724  * Management mode and power management related subroutines.
   12725  * BMC, AMT, suspend/resume and EEE.
   12726  */
   12727 
   12728 #ifdef WM_WOL
   12729 static int
   12730 wm_check_mng_mode(struct wm_softc *sc)
   12731 {
   12732 	int rv;
   12733 
   12734 	switch (sc->sc_type) {
   12735 	case WM_T_ICH8:
   12736 	case WM_T_ICH9:
   12737 	case WM_T_ICH10:
   12738 	case WM_T_PCH:
   12739 	case WM_T_PCH2:
   12740 	case WM_T_PCH_LPT:
   12741 	case WM_T_PCH_SPT:
   12742 		rv = wm_check_mng_mode_ich8lan(sc);
   12743 		break;
   12744 	case WM_T_82574:
   12745 	case WM_T_82583:
   12746 		rv = wm_check_mng_mode_82574(sc);
   12747 		break;
   12748 	case WM_T_82571:
   12749 	case WM_T_82572:
   12750 	case WM_T_82573:
   12751 	case WM_T_80003:
   12752 		rv = wm_check_mng_mode_generic(sc);
   12753 		break;
   12754 	default:
   12755 		/* noting to do */
   12756 		rv = 0;
   12757 		break;
   12758 	}
   12759 
   12760 	return rv;
   12761 }
   12762 
   12763 static int
   12764 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12765 {
   12766 	uint32_t fwsm;
   12767 
   12768 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12769 
   12770 	if (((fwsm & FWSM_FW_VALID) != 0)
   12771 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12772 		return 1;
   12773 
   12774 	return 0;
   12775 }
   12776 
   12777 static int
   12778 wm_check_mng_mode_82574(struct wm_softc *sc)
   12779 {
   12780 	uint16_t data;
   12781 
   12782 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12783 
   12784 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12785 		return 1;
   12786 
   12787 	return 0;
   12788 }
   12789 
   12790 static int
   12791 wm_check_mng_mode_generic(struct wm_softc *sc)
   12792 {
   12793 	uint32_t fwsm;
   12794 
   12795 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12796 
   12797 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12798 		return 1;
   12799 
   12800 	return 0;
   12801 }
   12802 #endif /* WM_WOL */
   12803 
   12804 static int
   12805 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12806 {
   12807 	uint32_t manc, fwsm, factps;
   12808 
   12809 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12810 		return 0;
   12811 
   12812 	manc = CSR_READ(sc, WMREG_MANC);
   12813 
   12814 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12815 		device_xname(sc->sc_dev), manc));
   12816 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12817 		return 0;
   12818 
   12819 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12820 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12821 		factps = CSR_READ(sc, WMREG_FACTPS);
   12822 		if (((factps & FACTPS_MNGCG) == 0)
   12823 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12824 			return 1;
   12825 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12826 		uint16_t data;
   12827 
   12828 		factps = CSR_READ(sc, WMREG_FACTPS);
   12829 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12830 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12831 			device_xname(sc->sc_dev), factps, data));
   12832 		if (((factps & FACTPS_MNGCG) == 0)
   12833 		    && ((data & NVM_CFG2_MNGM_MASK)
   12834 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12835 			return 1;
   12836 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12837 	    && ((manc & MANC_ASF_EN) == 0))
   12838 		return 1;
   12839 
   12840 	return 0;
   12841 }
   12842 
   12843 static bool
   12844 wm_phy_resetisblocked(struct wm_softc *sc)
   12845 {
   12846 	bool blocked = false;
   12847 	uint32_t reg;
   12848 	int i = 0;
   12849 
   12850 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12851 		device_xname(sc->sc_dev), __func__));
   12852 
   12853 	switch (sc->sc_type) {
   12854 	case WM_T_ICH8:
   12855 	case WM_T_ICH9:
   12856 	case WM_T_ICH10:
   12857 	case WM_T_PCH:
   12858 	case WM_T_PCH2:
   12859 	case WM_T_PCH_LPT:
   12860 	case WM_T_PCH_SPT:
   12861 		do {
   12862 			reg = CSR_READ(sc, WMREG_FWSM);
   12863 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12864 				blocked = true;
   12865 				delay(10*1000);
   12866 				continue;
   12867 			}
   12868 			blocked = false;
   12869 		} while (blocked && (i++ < 30));
   12870 		return blocked;
   12871 		break;
   12872 	case WM_T_82571:
   12873 	case WM_T_82572:
   12874 	case WM_T_82573:
   12875 	case WM_T_82574:
   12876 	case WM_T_82583:
   12877 	case WM_T_80003:
   12878 		reg = CSR_READ(sc, WMREG_MANC);
   12879 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12880 			return true;
   12881 		else
   12882 			return false;
   12883 		break;
   12884 	default:
   12885 		/* no problem */
   12886 		break;
   12887 	}
   12888 
   12889 	return false;
   12890 }
   12891 
   12892 static void
   12893 wm_get_hw_control(struct wm_softc *sc)
   12894 {
   12895 	uint32_t reg;
   12896 
   12897 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12898 		device_xname(sc->sc_dev), __func__));
   12899 
   12900 	if (sc->sc_type == WM_T_82573) {
   12901 		reg = CSR_READ(sc, WMREG_SWSM);
   12902 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12903 	} else if (sc->sc_type >= WM_T_82571) {
   12904 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12905 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12906 	}
   12907 }
   12908 
   12909 static void
   12910 wm_release_hw_control(struct wm_softc *sc)
   12911 {
   12912 	uint32_t reg;
   12913 
   12914 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12915 		device_xname(sc->sc_dev), __func__));
   12916 
   12917 	if (sc->sc_type == WM_T_82573) {
   12918 		reg = CSR_READ(sc, WMREG_SWSM);
   12919 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12920 	} else if (sc->sc_type >= WM_T_82571) {
   12921 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12922 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12923 	}
   12924 }
   12925 
   12926 static void
   12927 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12928 {
   12929 	uint32_t reg;
   12930 
   12931 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12932 		device_xname(sc->sc_dev), __func__));
   12933 
   12934 	if (sc->sc_type < WM_T_PCH2)
   12935 		return;
   12936 
   12937 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12938 
   12939 	if (gate)
   12940 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12941 	else
   12942 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12943 
   12944 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12945 }
   12946 
   12947 static void
   12948 wm_smbustopci(struct wm_softc *sc)
   12949 {
   12950 	uint32_t fwsm, reg;
   12951 	int rv = 0;
   12952 
   12953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12954 		device_xname(sc->sc_dev), __func__));
   12955 
   12956 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12957 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12958 
   12959 	/* Disable ULP */
   12960 	wm_ulp_disable(sc);
   12961 
   12962 	/* Acquire PHY semaphore */
   12963 	sc->phy.acquire(sc);
   12964 
   12965 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12966 	switch (sc->sc_type) {
   12967 	case WM_T_PCH_LPT:
   12968 	case WM_T_PCH_SPT:
   12969 		if (wm_phy_is_accessible_pchlan(sc))
   12970 			break;
   12971 
   12972 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12973 		reg |= CTRL_EXT_FORCE_SMBUS;
   12974 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12975 #if 0
   12976 		/* XXX Isn't this required??? */
   12977 		CSR_WRITE_FLUSH(sc);
   12978 #endif
   12979 		delay(50 * 1000);
   12980 		/* FALLTHROUGH */
   12981 	case WM_T_PCH2:
   12982 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12983 			break;
   12984 		/* FALLTHROUGH */
   12985 	case WM_T_PCH:
   12986 		if (sc->sc_type == WM_T_PCH)
   12987 			if ((fwsm & FWSM_FW_VALID) != 0)
   12988 				break;
   12989 
   12990 		if (wm_phy_resetisblocked(sc) == true) {
   12991 			printf("XXX reset is blocked(3)\n");
   12992 			break;
   12993 		}
   12994 
   12995 		wm_toggle_lanphypc_pch_lpt(sc);
   12996 
   12997 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12998 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12999 				break;
   13000 
   13001 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13002 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13003 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13004 
   13005 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13006 				break;
   13007 			rv = -1;
   13008 		}
   13009 		break;
   13010 	default:
   13011 		break;
   13012 	}
   13013 
   13014 	/* Release semaphore */
   13015 	sc->phy.release(sc);
   13016 
   13017 	if (rv == 0) {
   13018 		if (wm_phy_resetisblocked(sc)) {
   13019 			printf("XXX reset is blocked(4)\n");
   13020 			goto out;
   13021 		}
   13022 		wm_reset_phy(sc);
   13023 		if (wm_phy_resetisblocked(sc))
   13024 			printf("XXX reset is blocked(4)\n");
   13025 	}
   13026 
   13027 out:
   13028 	/*
   13029 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13030 	 */
   13031 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13032 		delay(10*1000);
   13033 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13034 	}
   13035 }
   13036 
   13037 static void
   13038 wm_init_manageability(struct wm_softc *sc)
   13039 {
   13040 
   13041 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13042 		device_xname(sc->sc_dev), __func__));
   13043 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13044 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13045 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13046 
   13047 		/* Disable hardware interception of ARP */
   13048 		manc &= ~MANC_ARP_EN;
   13049 
   13050 		/* Enable receiving management packets to the host */
   13051 		if (sc->sc_type >= WM_T_82571) {
   13052 			manc |= MANC_EN_MNG2HOST;
   13053 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13054 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13055 		}
   13056 
   13057 		CSR_WRITE(sc, WMREG_MANC, manc);
   13058 	}
   13059 }
   13060 
   13061 static void
   13062 wm_release_manageability(struct wm_softc *sc)
   13063 {
   13064 
   13065 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13066 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13067 
   13068 		manc |= MANC_ARP_EN;
   13069 		if (sc->sc_type >= WM_T_82571)
   13070 			manc &= ~MANC_EN_MNG2HOST;
   13071 
   13072 		CSR_WRITE(sc, WMREG_MANC, manc);
   13073 	}
   13074 }
   13075 
   13076 static void
   13077 wm_get_wakeup(struct wm_softc *sc)
   13078 {
   13079 
   13080 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13081 	switch (sc->sc_type) {
   13082 	case WM_T_82573:
   13083 	case WM_T_82583:
   13084 		sc->sc_flags |= WM_F_HAS_AMT;
   13085 		/* FALLTHROUGH */
   13086 	case WM_T_80003:
   13087 	case WM_T_82575:
   13088 	case WM_T_82576:
   13089 	case WM_T_82580:
   13090 	case WM_T_I350:
   13091 	case WM_T_I354:
   13092 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13093 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13094 		/* FALLTHROUGH */
   13095 	case WM_T_82541:
   13096 	case WM_T_82541_2:
   13097 	case WM_T_82547:
   13098 	case WM_T_82547_2:
   13099 	case WM_T_82571:
   13100 	case WM_T_82572:
   13101 	case WM_T_82574:
   13102 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13103 		break;
   13104 	case WM_T_ICH8:
   13105 	case WM_T_ICH9:
   13106 	case WM_T_ICH10:
   13107 	case WM_T_PCH:
   13108 	case WM_T_PCH2:
   13109 	case WM_T_PCH_LPT:
   13110 	case WM_T_PCH_SPT:
   13111 		sc->sc_flags |= WM_F_HAS_AMT;
   13112 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13113 		break;
   13114 	default:
   13115 		break;
   13116 	}
   13117 
   13118 	/* 1: HAS_MANAGE */
   13119 	if (wm_enable_mng_pass_thru(sc) != 0)
   13120 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13121 
   13122 	/*
   13123 	 * Note that the WOL flags is set after the resetting of the eeprom
   13124 	 * stuff
   13125 	 */
   13126 }
   13127 
   13128 /*
   13129  * Unconfigure Ultra Low Power mode.
   13130  * Only for I217 and newer (see below).
   13131  */
   13132 static void
   13133 wm_ulp_disable(struct wm_softc *sc)
   13134 {
   13135 	uint32_t reg;
   13136 	int i = 0;
   13137 
   13138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13139 		device_xname(sc->sc_dev), __func__));
   13140 	/* Exclude old devices */
   13141 	if ((sc->sc_type < WM_T_PCH_LPT)
   13142 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13143 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13144 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13145 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13146 		return;
   13147 
   13148 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13149 		/* Request ME un-configure ULP mode in the PHY */
   13150 		reg = CSR_READ(sc, WMREG_H2ME);
   13151 		reg &= ~H2ME_ULP;
   13152 		reg |= H2ME_ENFORCE_SETTINGS;
   13153 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13154 
   13155 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13156 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13157 			if (i++ == 30) {
   13158 				printf("%s timed out\n", __func__);
   13159 				return;
   13160 			}
   13161 			delay(10 * 1000);
   13162 		}
   13163 		reg = CSR_READ(sc, WMREG_H2ME);
   13164 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13165 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13166 
   13167 		return;
   13168 	}
   13169 
   13170 	/* Acquire semaphore */
   13171 	sc->phy.acquire(sc);
   13172 
   13173 	/* Toggle LANPHYPC */
   13174 	wm_toggle_lanphypc_pch_lpt(sc);
   13175 
   13176 	/* Unforce SMBus mode in PHY */
   13177 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13178 	if (reg == 0x0000 || reg == 0xffff) {
   13179 		uint32_t reg2;
   13180 
   13181 		printf("%s: Force SMBus first.\n", __func__);
   13182 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13183 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13184 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13185 		delay(50 * 1000);
   13186 
   13187 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13188 	}
   13189 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13190 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13191 
   13192 	/* Unforce SMBus mode in MAC */
   13193 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13194 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13195 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13196 
   13197 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13198 	reg |= HV_PM_CTRL_K1_ENA;
   13199 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13200 
   13201 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13202 	reg &= ~(I218_ULP_CONFIG1_IND
   13203 	    | I218_ULP_CONFIG1_STICKY_ULP
   13204 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13205 	    | I218_ULP_CONFIG1_WOL_HOST
   13206 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13207 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13208 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13209 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13210 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13211 	reg |= I218_ULP_CONFIG1_START;
   13212 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13213 
   13214 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13215 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13216 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13217 
   13218 	/* Release semaphore */
   13219 	sc->phy.release(sc);
   13220 	wm_gmii_reset(sc);
   13221 	delay(50 * 1000);
   13222 }
   13223 
   13224 /* WOL in the newer chipset interfaces (pchlan) */
   13225 static void
   13226 wm_enable_phy_wakeup(struct wm_softc *sc)
   13227 {
   13228 #if 0
   13229 	uint16_t preg;
   13230 
   13231 	/* Copy MAC RARs to PHY RARs */
   13232 
   13233 	/* Copy MAC MTA to PHY MTA */
   13234 
   13235 	/* Configure PHY Rx Control register */
   13236 
   13237 	/* Enable PHY wakeup in MAC register */
   13238 
   13239 	/* Configure and enable PHY wakeup in PHY registers */
   13240 
   13241 	/* Activate PHY wakeup */
   13242 
   13243 	/* XXX */
   13244 #endif
   13245 }
   13246 
   13247 /* Power down workaround on D3 */
   13248 static void
   13249 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13250 {
   13251 	uint32_t reg;
   13252 	int i;
   13253 
   13254 	for (i = 0; i < 2; i++) {
   13255 		/* Disable link */
   13256 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13257 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13258 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13259 
   13260 		/*
   13261 		 * Call gig speed drop workaround on Gig disable before
   13262 		 * accessing any PHY registers
   13263 		 */
   13264 		if (sc->sc_type == WM_T_ICH8)
   13265 			wm_gig_downshift_workaround_ich8lan(sc);
   13266 
   13267 		/* Write VR power-down enable */
   13268 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13269 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13270 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13271 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13272 
   13273 		/* Read it back and test */
   13274 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13275 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13276 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13277 			break;
   13278 
   13279 		/* Issue PHY reset and repeat at most one more time */
   13280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13281 	}
   13282 }
   13283 
   13284 static void
   13285 wm_enable_wakeup(struct wm_softc *sc)
   13286 {
   13287 	uint32_t reg, pmreg;
   13288 	pcireg_t pmode;
   13289 
   13290 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13291 		device_xname(sc->sc_dev), __func__));
   13292 
   13293 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13294 		&pmreg, NULL) == 0)
   13295 		return;
   13296 
   13297 	/* Advertise the wakeup capability */
   13298 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13299 	    | CTRL_SWDPIN(3));
   13300 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13301 
   13302 	/* ICH workaround */
   13303 	switch (sc->sc_type) {
   13304 	case WM_T_ICH8:
   13305 	case WM_T_ICH9:
   13306 	case WM_T_ICH10:
   13307 	case WM_T_PCH:
   13308 	case WM_T_PCH2:
   13309 	case WM_T_PCH_LPT:
   13310 	case WM_T_PCH_SPT:
   13311 		/* Disable gig during WOL */
   13312 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13313 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13314 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13315 		if (sc->sc_type == WM_T_PCH)
   13316 			wm_gmii_reset(sc);
   13317 
   13318 		/* Power down workaround */
   13319 		if (sc->sc_phytype == WMPHY_82577) {
   13320 			struct mii_softc *child;
   13321 
   13322 			/* Assume that the PHY is copper */
   13323 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13324 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13325 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13326 				    (768 << 5) | 25, 0x0444); /* magic num */
   13327 		}
   13328 		break;
   13329 	default:
   13330 		break;
   13331 	}
   13332 
   13333 	/* Keep the laser running on fiber adapters */
   13334 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13335 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13336 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13337 		reg |= CTRL_EXT_SWDPIN(3);
   13338 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13339 	}
   13340 
   13341 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13342 #if 0	/* for the multicast packet */
   13343 	reg |= WUFC_MC;
   13344 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13345 #endif
   13346 
   13347 	if (sc->sc_type >= WM_T_PCH)
   13348 		wm_enable_phy_wakeup(sc);
   13349 	else {
   13350 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13351 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13352 	}
   13353 
   13354 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13355 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13356 		|| (sc->sc_type == WM_T_PCH2))
   13357 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13358 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13359 
   13360 	/* Request PME */
   13361 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13362 #if 0
   13363 	/* Disable WOL */
   13364 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13365 #else
   13366 	/* For WOL */
   13367 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13368 #endif
   13369 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13370 }
   13371 
   13372 /* LPLU */
   13373 
   13374 static void
   13375 wm_lplu_d0_disable(struct wm_softc *sc)
   13376 {
   13377 	struct mii_data *mii = &sc->sc_mii;
   13378 	uint32_t reg;
   13379 
   13380 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13381 		device_xname(sc->sc_dev), __func__));
   13382 
   13383 	printf("%s called\n", __func__);
   13384 
   13385 	if (sc->sc_phytype == WMPHY_IFE)
   13386 		return;
   13387 
   13388 	switch (sc->sc_type) {
   13389 	case WM_T_82571:
   13390 	case WM_T_82572:
   13391 	case WM_T_82573:
   13392 	case WM_T_82575:
   13393 	case WM_T_82576:
   13394 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13395 		reg &= ~PMR_D0_LPLU;
   13396 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13397 		break;
   13398 	case WM_T_82580:
   13399 	case WM_T_I350:
   13400 	case WM_T_I210:
   13401 	case WM_T_I211:
   13402 		reg = CSR_READ(sc, WMREG_PHPM);
   13403 		reg &= ~PHPM_D0A_LPLU;
   13404 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13405 		break;
   13406 	case WM_T_82574:
   13407 	case WM_T_82583:
   13408 	case WM_T_ICH8:
   13409 	case WM_T_ICH9:
   13410 	case WM_T_ICH10:
   13411 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13412 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13413 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13414 		CSR_WRITE_FLUSH(sc);
   13415 		break;
   13416 	case WM_T_PCH:
   13417 	case WM_T_PCH2:
   13418 	case WM_T_PCH_LPT:
   13419 	case WM_T_PCH_SPT:
   13420 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13421 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13422 		if (wm_phy_resetisblocked(sc) == false)
   13423 			reg |= HV_OEM_BITS_ANEGNOW;
   13424 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13425 		break;
   13426 	default:
   13427 		break;
   13428 	}
   13429 }
   13430 
   13431 /* EEE */
   13432 
   13433 static void
   13434 wm_set_eee_i350(struct wm_softc *sc)
   13435 {
   13436 	uint32_t ipcnfg, eeer;
   13437 
   13438 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13439 	eeer = CSR_READ(sc, WMREG_EEER);
   13440 
   13441 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13442 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13443 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13444 		    | EEER_LPI_FC);
   13445 	} else {
   13446 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13447 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13448 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13449 		    | EEER_LPI_FC);
   13450 	}
   13451 
   13452 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13453 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13454 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13455 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13456 }
   13457 
   13458 /*
   13459  * Workarounds (mainly PHY related).
   13460  * Basically, PHY's workarounds are in the PHY drivers.
   13461  */
   13462 
   13463 /* Work-around for 82566 Kumeran PCS lock loss */
   13464 static void
   13465 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13466 {
   13467 #if 0
   13468 	int miistatus, active, i;
   13469 	int reg;
   13470 
   13471 	miistatus = sc->sc_mii.mii_media_status;
   13472 
   13473 	/* If the link is not up, do nothing */
   13474 	if ((miistatus & IFM_ACTIVE) == 0)
   13475 		return;
   13476 
   13477 	active = sc->sc_mii.mii_media_active;
   13478 
   13479 	/* Nothing to do if the link is other than 1Gbps */
   13480 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13481 		return;
   13482 
   13483 	for (i = 0; i < 10; i++) {
   13484 		/* read twice */
   13485 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13486 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13487 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13488 			goto out;	/* GOOD! */
   13489 
   13490 		/* Reset the PHY */
   13491 		wm_gmii_reset(sc);
   13492 		delay(5*1000);
   13493 	}
   13494 
   13495 	/* Disable GigE link negotiation */
   13496 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13497 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13498 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13499 
   13500 	/*
   13501 	 * Call gig speed drop workaround on Gig disable before accessing
   13502 	 * any PHY registers.
   13503 	 */
   13504 	wm_gig_downshift_workaround_ich8lan(sc);
   13505 
   13506 out:
   13507 	return;
   13508 #endif
   13509 }
   13510 
   13511 /* WOL from S5 stops working */
   13512 static void
   13513 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13514 {
   13515 	uint16_t kmrn_reg;
   13516 
   13517 	/* Only for igp3 */
   13518 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13519 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13520 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13521 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13522 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13523 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13524 	}
   13525 }
   13526 
   13527 /*
   13528  * Workaround for pch's PHYs
   13529  * XXX should be moved to new PHY driver?
   13530  */
   13531 static void
   13532 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13533 {
   13534 
   13535 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13536 		device_xname(sc->sc_dev), __func__));
   13537 	KASSERT(sc->sc_type == WM_T_PCH);
   13538 
   13539 	if (sc->sc_phytype == WMPHY_82577)
   13540 		wm_set_mdio_slow_mode_hv(sc);
   13541 
   13542 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13543 
   13544 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13545 
   13546 	/* 82578 */
   13547 	if (sc->sc_phytype == WMPHY_82578) {
   13548 		struct mii_softc *child;
   13549 
   13550 		/*
   13551 		 * Return registers to default by doing a soft reset then
   13552 		 * writing 0x3140 to the control register
   13553 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13554 		 */
   13555 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13556 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13557 			PHY_RESET(child);
   13558 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13559 			    0x3140);
   13560 		}
   13561 	}
   13562 
   13563 	/* Select page 0 */
   13564 	sc->phy.acquire(sc);
   13565 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13566 	sc->phy.release(sc);
   13567 
   13568 	/*
   13569 	 * Configure the K1 Si workaround during phy reset assuming there is
   13570 	 * link so that it disables K1 if link is in 1Gbps.
   13571 	 */
   13572 	wm_k1_gig_workaround_hv(sc, 1);
   13573 }
   13574 
   13575 static void
   13576 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13577 {
   13578 
   13579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13580 		device_xname(sc->sc_dev), __func__));
   13581 	KASSERT(sc->sc_type == WM_T_PCH2);
   13582 
   13583 	wm_set_mdio_slow_mode_hv(sc);
   13584 }
   13585 
   13586 static int
   13587 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13588 {
   13589 	int k1_enable = sc->sc_nvm_k1_enabled;
   13590 
   13591 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13592 		device_xname(sc->sc_dev), __func__));
   13593 
   13594 	if (sc->phy.acquire(sc) != 0)
   13595 		return -1;
   13596 
   13597 	if (link) {
   13598 		k1_enable = 0;
   13599 
   13600 		/* Link stall fix for link up */
   13601 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13602 	} else {
   13603 		/* Link stall fix for link down */
   13604 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13605 	}
   13606 
   13607 	wm_configure_k1_ich8lan(sc, k1_enable);
   13608 	sc->phy.release(sc);
   13609 
   13610 	return 0;
   13611 }
   13612 
   13613 static void
   13614 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13615 {
   13616 	uint32_t reg;
   13617 
   13618 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13619 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13620 	    reg | HV_KMRN_MDIO_SLOW);
   13621 }
   13622 
   13623 static void
   13624 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13625 {
   13626 	uint32_t ctrl, ctrl_ext, tmp;
   13627 	uint16_t kmrn_reg;
   13628 
   13629 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13630 
   13631 	if (k1_enable)
   13632 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13633 	else
   13634 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13635 
   13636 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13637 
   13638 	delay(20);
   13639 
   13640 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13641 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13642 
   13643 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13644 	tmp |= CTRL_FRCSPD;
   13645 
   13646 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13647 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13648 	CSR_WRITE_FLUSH(sc);
   13649 	delay(20);
   13650 
   13651 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13652 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13653 	CSR_WRITE_FLUSH(sc);
   13654 	delay(20);
   13655 }
   13656 
   13657 /* special case - for 82575 - need to do manual init ... */
   13658 static void
   13659 wm_reset_init_script_82575(struct wm_softc *sc)
   13660 {
   13661 	/*
   13662 	 * remark: this is untested code - we have no board without EEPROM
   13663 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13664 	 */
   13665 
   13666 	/* SerDes configuration via SERDESCTRL */
   13667 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13671 
   13672 	/* CCM configuration via CCMCTL register */
   13673 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13675 
   13676 	/* PCIe lanes configuration */
   13677 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13678 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13680 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13681 
   13682 	/* PCIe PLL Configuration */
   13683 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13684 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13685 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13686 }
   13687 
   13688 static void
   13689 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13690 {
   13691 	uint32_t reg;
   13692 	uint16_t nvmword;
   13693 	int rv;
   13694 
   13695 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13696 		return;
   13697 
   13698 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13699 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13700 	if (rv != 0) {
   13701 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13702 		    __func__);
   13703 		return;
   13704 	}
   13705 
   13706 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13707 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13708 		reg |= MDICNFG_DEST;
   13709 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13710 		reg |= MDICNFG_COM_MDIO;
   13711 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13712 }
   13713 
   13714 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13715 
   13716 static bool
   13717 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13718 {
   13719 	int i;
   13720 	uint32_t reg;
   13721 	uint16_t id1, id2;
   13722 
   13723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13724 		device_xname(sc->sc_dev), __func__));
   13725 	id1 = id2 = 0xffff;
   13726 	for (i = 0; i < 2; i++) {
   13727 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13728 		if (MII_INVALIDID(id1))
   13729 			continue;
   13730 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13731 		if (MII_INVALIDID(id2))
   13732 			continue;
   13733 		break;
   13734 	}
   13735 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13736 		goto out;
   13737 	}
   13738 
   13739 	if (sc->sc_type < WM_T_PCH_LPT) {
   13740 		sc->phy.release(sc);
   13741 		wm_set_mdio_slow_mode_hv(sc);
   13742 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13743 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13744 		sc->phy.acquire(sc);
   13745 	}
   13746 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13747 		printf("XXX return with false\n");
   13748 		return false;
   13749 	}
   13750 out:
   13751 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13752 		/* Only unforce SMBus if ME is not active */
   13753 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13754 			/* Unforce SMBus mode in PHY */
   13755 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13756 			    CV_SMB_CTRL);
   13757 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13758 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13759 			    CV_SMB_CTRL, reg);
   13760 
   13761 			/* Unforce SMBus mode in MAC */
   13762 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13763 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13764 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13765 		}
   13766 	}
   13767 	return true;
   13768 }
   13769 
   13770 static void
   13771 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13772 {
   13773 	uint32_t reg;
   13774 	int i;
   13775 
   13776 	/* Set PHY Config Counter to 50msec */
   13777 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13778 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13779 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13780 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13781 
   13782 	/* Toggle LANPHYPC */
   13783 	reg = CSR_READ(sc, WMREG_CTRL);
   13784 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13785 	reg &= ~CTRL_LANPHYPC_VALUE;
   13786 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13787 	CSR_WRITE_FLUSH(sc);
   13788 	delay(1000);
   13789 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13790 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13791 	CSR_WRITE_FLUSH(sc);
   13792 
   13793 	if (sc->sc_type < WM_T_PCH_LPT)
   13794 		delay(50 * 1000);
   13795 	else {
   13796 		i = 20;
   13797 
   13798 		do {
   13799 			delay(5 * 1000);
   13800 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13801 		    && i--);
   13802 
   13803 		delay(30 * 1000);
   13804 	}
   13805 }
   13806 
   13807 static int
   13808 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13809 {
   13810 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13811 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13812 	uint32_t rxa;
   13813 	uint16_t scale = 0, lat_enc = 0;
   13814 	int32_t obff_hwm = 0;
   13815 	int64_t lat_ns, value;
   13816 
   13817 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13818 		device_xname(sc->sc_dev), __func__));
   13819 
   13820 	if (link) {
   13821 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13822 		uint32_t status;
   13823 		uint16_t speed;
   13824 		pcireg_t preg;
   13825 
   13826 		status = CSR_READ(sc, WMREG_STATUS);
   13827 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13828 		case STATUS_SPEED_10:
   13829 			speed = 10;
   13830 			break;
   13831 		case STATUS_SPEED_100:
   13832 			speed = 100;
   13833 			break;
   13834 		case STATUS_SPEED_1000:
   13835 			speed = 1000;
   13836 			break;
   13837 		default:
   13838 			device_printf(sc->sc_dev, "Unknown speed "
   13839 			    "(status = %08x)\n", status);
   13840 			return -1;
   13841 		}
   13842 
   13843 		/* Rx Packet Buffer Allocation size (KB) */
   13844 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13845 
   13846 		/*
   13847 		 * Determine the maximum latency tolerated by the device.
   13848 		 *
   13849 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13850 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13851 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13852 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13853 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13854 		 */
   13855 		lat_ns = ((int64_t)rxa * 1024 -
   13856 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   13857 			+ ETHER_HDR_LEN))) * 8 * 1000;
   13858 		if (lat_ns < 0)
   13859 			lat_ns = 0;
   13860 		else
   13861 			lat_ns /= speed;
   13862 		value = lat_ns;
   13863 
   13864 		while (value > LTRV_VALUE) {
   13865 			scale ++;
   13866 			value = howmany(value, __BIT(5));
   13867 		}
   13868 		if (scale > LTRV_SCALE_MAX) {
   13869 			printf("%s: Invalid LTR latency scale %d\n",
   13870 			    device_xname(sc->sc_dev), scale);
   13871 			return -1;
   13872 		}
   13873 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13874 
   13875 		/* Determine the maximum latency tolerated by the platform */
   13876 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13877 		    WM_PCI_LTR_CAP_LPT);
   13878 		max_snoop = preg & 0xffff;
   13879 		max_nosnoop = preg >> 16;
   13880 
   13881 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13882 
   13883 		if (lat_enc > max_ltr_enc) {
   13884 			lat_enc = max_ltr_enc;
   13885 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   13886 			    * PCI_LTR_SCALETONS(
   13887 				    __SHIFTOUT(lat_enc,
   13888 					PCI_LTR_MAXSNOOPLAT_SCALE));
   13889 		}
   13890 
   13891 		if (lat_ns) {
   13892 			lat_ns *= speed * 1000;
   13893 			lat_ns /= 8;
   13894 			lat_ns /= 1000000000;
   13895 			obff_hwm = (int32_t)(rxa - lat_ns);
   13896 		}
   13897 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   13898 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   13899 			    "(rxa = %d, lat_ns = %d)\n",
   13900 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   13901 			return -1;
   13902 		}
   13903 	}
   13904 	/* Snoop and No-Snoop latencies the same */
   13905 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13906 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13907 
   13908 	/* Set OBFF high water mark */
   13909 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   13910 	reg |= obff_hwm;
   13911 	CSR_WRITE(sc, WMREG_SVT, reg);
   13912 
   13913 	/* Enable OBFF */
   13914 	reg = CSR_READ(sc, WMREG_SVCR);
   13915 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   13916 	CSR_WRITE(sc, WMREG_SVCR, reg);
   13917 
   13918 	return 0;
   13919 }
   13920 
   13921 /*
   13922  * I210 Errata 25 and I211 Errata 10
   13923  * Slow System Clock.
   13924  */
   13925 static void
   13926 wm_pll_workaround_i210(struct wm_softc *sc)
   13927 {
   13928 	uint32_t mdicnfg, wuc;
   13929 	uint32_t reg;
   13930 	pcireg_t pcireg;
   13931 	uint32_t pmreg;
   13932 	uint16_t nvmword, tmp_nvmword;
   13933 	int phyval;
   13934 	bool wa_done = false;
   13935 	int i;
   13936 
   13937 	/* Save WUC and MDICNFG registers */
   13938 	wuc = CSR_READ(sc, WMREG_WUC);
   13939 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13940 
   13941 	reg = mdicnfg & ~MDICNFG_DEST;
   13942 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13943 
   13944 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13945 		nvmword = INVM_DEFAULT_AL;
   13946 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13947 
   13948 	/* Get Power Management cap offset */
   13949 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13950 		&pmreg, NULL) == 0)
   13951 		return;
   13952 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13953 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13954 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13955 
   13956 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13957 			break; /* OK */
   13958 		}
   13959 
   13960 		wa_done = true;
   13961 		/* Directly reset the internal PHY */
   13962 		reg = CSR_READ(sc, WMREG_CTRL);
   13963 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13964 
   13965 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13966 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13967 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13968 
   13969 		CSR_WRITE(sc, WMREG_WUC, 0);
   13970 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13971 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13972 
   13973 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13974 		    pmreg + PCI_PMCSR);
   13975 		pcireg |= PCI_PMCSR_STATE_D3;
   13976 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13977 		    pmreg + PCI_PMCSR, pcireg);
   13978 		delay(1000);
   13979 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13980 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13981 		    pmreg + PCI_PMCSR, pcireg);
   13982 
   13983 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13984 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13985 
   13986 		/* Restore WUC register */
   13987 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13988 	}
   13989 
   13990 	/* Restore MDICNFG setting */
   13991 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13992 	if (wa_done)
   13993 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13994 }
   13995 
   13996 static void
   13997 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   13998 {
   13999 	uint32_t reg;
   14000 
   14001 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14002 		device_xname(sc->sc_dev), __func__));
   14003 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14004 
   14005 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14006 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14007 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14008 
   14009 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14010 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14011 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14012 }
   14013