Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.521
      1 /*	$NetBSD: if_wm.c,v 1.521 2017/07/13 07:50:49 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.521 2017/07/13 07:50:49 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    165 #else
    166 #define CALLOUT_FLAGS	0
    167 #endif
    168 
    169 /*
    170  * This device driver's max interrupt numbers.
    171  */
    172 #define WM_MAX_NQUEUEINTR	16
    173 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    174 
    175 #ifndef WM_DISABLE_MSI
    176 #define	WM_DISABLE_MSI 0
    177 #endif
    178 #ifndef WM_DISABLE_MSIX
    179 #define	WM_DISABLE_MSIX 0
    180 #endif
    181 
    182 int wm_disable_msi = WM_DISABLE_MSI;
    183 int wm_disable_msix = WM_DISABLE_MSIX;
    184 
    185 /*
    186  * Transmit descriptor list size.  Due to errata, we can only have
    187  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    188  * on >= 82544.  We tell the upper layers that they can queue a lot
    189  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    190  * of them at a time.
    191  *
    192  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    193  * chains containing many small mbufs have been observed in zero-copy
    194  * situations with jumbo frames.
    195  */
    196 #define	WM_NTXSEGS		256
    197 #define	WM_IFQUEUELEN		256
    198 #define	WM_TXQUEUELEN_MAX	64
    199 #define	WM_TXQUEUELEN_MAX_82547	16
    200 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    201 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    202 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    203 #define	WM_NTXDESC_82542	256
    204 #define	WM_NTXDESC_82544	4096
    205 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    206 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    207 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    208 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    209 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    210 
    211 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    212 
    213 #define	WM_TXINTERQSIZE		256
    214 
    215 /*
    216  * Receive descriptor list size.  We have one Rx buffer for normal
    217  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    218  * packet.  We allocate 256 receive descriptors, each with a 2k
    219  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    220  */
    221 #define	WM_NRXDESC		256
    222 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    223 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    224 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    225 
    226 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    227 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    228 #endif
    229 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    230 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    231 #endif
    232 
    233 typedef union txdescs {
    234 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    235 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    236 } txdescs_t;
    237 
    238 typedef union rxdescs {
    239 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    240 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    241 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    242 } rxdescs_t;
    243 
    244 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    245 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    246 
    247 /*
    248  * Software state for transmit jobs.
    249  */
    250 struct wm_txsoft {
    251 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    252 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    253 	int txs_firstdesc;		/* first descriptor in packet */
    254 	int txs_lastdesc;		/* last descriptor in packet */
    255 	int txs_ndesc;			/* # of descriptors used */
    256 };
    257 
    258 /*
    259  * Software state for receive buffers.  Each descriptor gets a
    260  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    261  * more than one buffer, we chain them together.
    262  */
    263 struct wm_rxsoft {
    264 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    265 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    266 };
    267 
    268 #define WM_LINKUP_TIMEOUT	50
    269 
    270 static uint16_t swfwphysem[] = {
    271 	SWFW_PHY0_SM,
    272 	SWFW_PHY1_SM,
    273 	SWFW_PHY2_SM,
    274 	SWFW_PHY3_SM
    275 };
    276 
    277 static const uint32_t wm_82580_rxpbs_table[] = {
    278 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    279 };
    280 
    281 struct wm_softc;
    282 
    283 #ifdef WM_EVENT_COUNTERS
    284 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    285 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    286 	struct evcnt qname##_ev_##evname;
    287 
    288 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    289 	do{								\
    290 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    291 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    292 		    "%s%02d%s", #qname, (qnum), #evname);		\
    293 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    294 		    (evtype), NULL, (xname),				\
    295 		    (q)->qname##_##evname##_evcnt_name);		\
    296 	}while(0)
    297 
    298 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    299 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    300 
    301 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    302 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    303 
    304 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    305 	evcnt_detach(&(q)->qname##_ev_##evname);
    306 #endif /* WM_EVENT_COUNTERS */
    307 
    308 struct wm_txqueue {
    309 	kmutex_t *txq_lock;		/* lock for tx operations */
    310 
    311 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    312 
    313 	/* Software state for the transmit descriptors. */
    314 	int txq_num;			/* must be a power of two */
    315 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    316 
    317 	/* TX control data structures. */
    318 	int txq_ndesc;			/* must be a power of two */
    319 	size_t txq_descsize;		/* a tx descriptor size */
    320 	txdescs_t *txq_descs_u;
    321         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    322 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    323 	int txq_desc_rseg;		/* real number of control segment */
    324 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    325 #define	txq_descs	txq_descs_u->sctxu_txdescs
    326 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    327 
    328 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    329 
    330 	int txq_free;			/* number of free Tx descriptors */
    331 	int txq_next;			/* next ready Tx descriptor */
    332 
    333 	int txq_sfree;			/* number of free Tx jobs */
    334 	int txq_snext;			/* next free Tx job */
    335 	int txq_sdirty;			/* dirty Tx jobs */
    336 
    337 	/* These 4 variables are used only on the 82547. */
    338 	int txq_fifo_size;		/* Tx FIFO size */
    339 	int txq_fifo_head;		/* current head of FIFO */
    340 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    341 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    342 
    343 	/*
    344 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    345 	 * CPUs. This queue intermediate them without block.
    346 	 */
    347 	pcq_t *txq_interq;
    348 
    349 	/*
    350 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    351 	 * to manage Tx H/W queue's busy flag.
    352 	 */
    353 	int txq_flags;			/* flags for H/W queue, see below */
    354 #define	WM_TXQ_NO_SPACE	0x1
    355 
    356 	bool txq_stopping;
    357 
    358 	uint32_t txq_packets;		/* for AIM */
    359 	uint32_t txq_bytes;		/* for AIM */
    360 #ifdef WM_EVENT_COUNTERS
    361 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    362 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    363 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    364 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    365 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    366 						/* XXX not used? */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    369 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    374 
    375 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    376 
    377 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    378 
    379 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    380 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    381 #endif /* WM_EVENT_COUNTERS */
    382 };
    383 
    384 struct wm_rxqueue {
    385 	kmutex_t *rxq_lock;		/* lock for rx operations */
    386 
    387 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    388 
    389 	/* Software state for the receive descriptors. */
    390 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    391 
    392 	/* RX control data structures. */
    393 	int rxq_ndesc;			/* must be a power of two */
    394 	size_t rxq_descsize;		/* a rx descriptor size */
    395 	rxdescs_t *rxq_descs_u;
    396 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    397 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    398 	int rxq_desc_rseg;		/* real number of control segment */
    399 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    400 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    401 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    402 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    403 
    404 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    405 
    406 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    407 	int rxq_discard;
    408 	int rxq_len;
    409 	struct mbuf *rxq_head;
    410 	struct mbuf *rxq_tail;
    411 	struct mbuf **rxq_tailp;
    412 
    413 	bool rxq_stopping;
    414 
    415 	uint32_t rxq_packets;		/* for AIM */
    416 	uint32_t rxq_bytes;		/* for AIM */
    417 #ifdef WM_EVENT_COUNTERS
    418 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    419 
    420 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    421 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    422 #endif
    423 };
    424 
    425 struct wm_queue {
    426 	int wmq_id;			/* index of transmit and receive queues */
    427 	int wmq_intr_idx;		/* index of MSI-X tables */
    428 
    429 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    430 	bool wmq_set_itr;
    431 
    432 	struct wm_txqueue wmq_txq;
    433 	struct wm_rxqueue wmq_rxq;
    434 
    435 	void *wmq_si;
    436 };
    437 
    438 struct wm_phyop {
    439 	int (*acquire)(struct wm_softc *);
    440 	void (*release)(struct wm_softc *);
    441 	int reset_delay_us;
    442 };
    443 
    444 /*
    445  * Software state per device.
    446  */
    447 struct wm_softc {
    448 	device_t sc_dev;		/* generic device information */
    449 	bus_space_tag_t sc_st;		/* bus space tag */
    450 	bus_space_handle_t sc_sh;	/* bus space handle */
    451 	bus_size_t sc_ss;		/* bus space size */
    452 	bus_space_tag_t sc_iot;		/* I/O space tag */
    453 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    454 	bus_size_t sc_ios;		/* I/O space size */
    455 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    456 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    457 	bus_size_t sc_flashs;		/* flash registers space size */
    458 	off_t sc_flashreg_offset;	/*
    459 					 * offset to flash registers from
    460 					 * start of BAR
    461 					 */
    462 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    463 
    464 	struct ethercom sc_ethercom;	/* ethernet common data */
    465 	struct mii_data sc_mii;		/* MII/media information */
    466 
    467 	pci_chipset_tag_t sc_pc;
    468 	pcitag_t sc_pcitag;
    469 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    470 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    471 
    472 	uint16_t sc_pcidevid;		/* PCI device ID */
    473 	wm_chip_type sc_type;		/* MAC type */
    474 	int sc_rev;			/* MAC revision */
    475 	wm_phy_type sc_phytype;		/* PHY type */
    476 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    477 #define	WM_MEDIATYPE_UNKNOWN		0x00
    478 #define	WM_MEDIATYPE_FIBER		0x01
    479 #define	WM_MEDIATYPE_COPPER		0x02
    480 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    481 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    482 	int sc_flags;			/* flags; see below */
    483 	int sc_if_flags;		/* last if_flags */
    484 	int sc_flowflags;		/* 802.3x flow control flags */
    485 	int sc_align_tweak;
    486 
    487 	void *sc_ihs[WM_MAX_NINTR];	/*
    488 					 * interrupt cookie.
    489 					 * - legacy and msi use sc_ihs[0] only
    490 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    491 					 */
    492 	pci_intr_handle_t *sc_intrs;	/*
    493 					 * legacy and msi use sc_intrs[0] only
    494 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    495 					 */
    496 	int sc_nintrs;			/* number of interrupts */
    497 
    498 	int sc_link_intr_idx;		/* index of MSI-X tables */
    499 
    500 	callout_t sc_tick_ch;		/* tick callout */
    501 	bool sc_core_stopping;
    502 
    503 	int sc_nvm_ver_major;
    504 	int sc_nvm_ver_minor;
    505 	int sc_nvm_ver_build;
    506 	int sc_nvm_addrbits;		/* NVM address bits */
    507 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    508 	int sc_ich8_flash_base;
    509 	int sc_ich8_flash_bank_size;
    510 	int sc_nvm_k1_enabled;
    511 
    512 	int sc_nqueues;
    513 	struct wm_queue *sc_queue;
    514 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    515 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    516 
    517 	int sc_affinity_offset;
    518 
    519 #ifdef WM_EVENT_COUNTERS
    520 	/* Event counters. */
    521 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    522 
    523         /* WM_T_82542_2_1 only */
    524 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    525 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    526 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    527 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    528 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    529 #endif /* WM_EVENT_COUNTERS */
    530 
    531 	/* This variable are used only on the 82547. */
    532 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    533 
    534 	uint32_t sc_ctrl;		/* prototype CTRL register */
    535 #if 0
    536 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    537 #endif
    538 	uint32_t sc_icr;		/* prototype interrupt bits */
    539 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    540 	uint32_t sc_tctl;		/* prototype TCTL register */
    541 	uint32_t sc_rctl;		/* prototype RCTL register */
    542 	uint32_t sc_txcw;		/* prototype TXCW register */
    543 	uint32_t sc_tipg;		/* prototype TIPG register */
    544 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    545 	uint32_t sc_pba;		/* prototype PBA register */
    546 
    547 	int sc_tbi_linkup;		/* TBI link status */
    548 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    549 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    550 
    551 	int sc_mchash_type;		/* multicast filter offset */
    552 
    553 	krndsource_t rnd_source;	/* random source */
    554 
    555 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    556 
    557 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    558 	kmutex_t *sc_ich_phymtx;	/*
    559 					 * 82574/82583/ICH/PCH specific PHY
    560 					 * mutex. For 82574/82583, the mutex
    561 					 * is used for both PHY and NVM.
    562 					 */
    563 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    564 
    565 	struct wm_phyop phy;
    566 };
    567 
    568 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    569 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    570 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    571 
    572 #define	WM_RXCHAIN_RESET(rxq)						\
    573 do {									\
    574 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    575 	*(rxq)->rxq_tailp = NULL;					\
    576 	(rxq)->rxq_len = 0;						\
    577 } while (/*CONSTCOND*/0)
    578 
    579 #define	WM_RXCHAIN_LINK(rxq, m)						\
    580 do {									\
    581 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    582 	(rxq)->rxq_tailp = &(m)->m_next;				\
    583 } while (/*CONSTCOND*/0)
    584 
    585 #ifdef WM_EVENT_COUNTERS
    586 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    587 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    588 
    589 #define WM_Q_EVCNT_INCR(qname, evname)			\
    590 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    591 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    592 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    593 #else /* !WM_EVENT_COUNTERS */
    594 #define	WM_EVCNT_INCR(ev)	/* nothing */
    595 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    598 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    599 #endif /* !WM_EVENT_COUNTERS */
    600 
    601 #define	CSR_READ(sc, reg)						\
    602 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    603 #define	CSR_WRITE(sc, reg, val)						\
    604 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    605 #define	CSR_WRITE_FLUSH(sc)						\
    606 	(void) CSR_READ((sc), WMREG_STATUS)
    607 
    608 #define ICH8_FLASH_READ32(sc, reg)					\
    609 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    610 	    (reg) + sc->sc_flashreg_offset)
    611 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    612 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    613 	    (reg) + sc->sc_flashreg_offset, (data))
    614 
    615 #define ICH8_FLASH_READ16(sc, reg)					\
    616 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    617 	    (reg) + sc->sc_flashreg_offset)
    618 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    619 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    620 	    (reg) + sc->sc_flashreg_offset, (data))
    621 
    622 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    623 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    624 
    625 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    626 #define	WM_CDTXADDR_HI(txq, x)						\
    627 	(sizeof(bus_addr_t) == 8 ?					\
    628 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    629 
    630 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    631 #define	WM_CDRXADDR_HI(rxq, x)						\
    632 	(sizeof(bus_addr_t) == 8 ?					\
    633 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    634 
    635 /*
    636  * Register read/write functions.
    637  * Other than CSR_{READ|WRITE}().
    638  */
    639 #if 0
    640 static inline uint32_t wm_io_read(struct wm_softc *, int);
    641 #endif
    642 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    643 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    644 	uint32_t, uint32_t);
    645 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    646 
    647 /*
    648  * Descriptor sync/init functions.
    649  */
    650 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    651 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    652 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    653 
    654 /*
    655  * Device driver interface functions and commonly used functions.
    656  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    657  */
    658 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    659 static int	wm_match(device_t, cfdata_t, void *);
    660 static void	wm_attach(device_t, device_t, void *);
    661 static int	wm_detach(device_t, int);
    662 static bool	wm_suspend(device_t, const pmf_qual_t *);
    663 static bool	wm_resume(device_t, const pmf_qual_t *);
    664 static void	wm_watchdog(struct ifnet *);
    665 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    666 static void	wm_tick(void *);
    667 static int	wm_ifflags_cb(struct ethercom *);
    668 static int	wm_ioctl(struct ifnet *, u_long, void *);
    669 /* MAC address related */
    670 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    671 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    672 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    673 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    674 static void	wm_set_filter(struct wm_softc *);
    675 /* Reset and init related */
    676 static void	wm_set_vlan(struct wm_softc *);
    677 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    678 static void	wm_get_auto_rd_done(struct wm_softc *);
    679 static void	wm_lan_init_done(struct wm_softc *);
    680 static void	wm_get_cfg_done(struct wm_softc *);
    681 static void	wm_phy_post_reset(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 /* EEE */
    913 static void	wm_set_eee_i350(struct wm_softc *);
    914 
    915 /*
    916  * Workarounds (mainly PHY related).
    917  * Basically, PHY's workarounds are in the PHY drivers.
    918  */
    919 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    920 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    923 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    924 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    925 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    926 static void	wm_reset_init_script_82575(struct wm_softc *);
    927 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    928 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    929 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    930 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    931 static void	wm_pll_workaround_i210(struct wm_softc *);
    932 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	char buf[256];
   1678 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1679 	pcireg_t preg, memtype;
   1680 	uint16_t eeprom_data, apme_mask;
   1681 	bool force_clear_smbi;
   1682 	uint32_t link_mode;
   1683 	uint32_t reg;
   1684 
   1685 	sc->sc_dev = self;
   1686 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1687 	sc->sc_core_stopping = false;
   1688 
   1689 	wmp = wm_lookup(pa);
   1690 #ifdef DIAGNOSTIC
   1691 	if (wmp == NULL) {
   1692 		printf("\n");
   1693 		panic("wm_attach: impossible");
   1694 	}
   1695 #endif
   1696 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1697 
   1698 	sc->sc_pc = pa->pa_pc;
   1699 	sc->sc_pcitag = pa->pa_tag;
   1700 
   1701 	if (pci_dma64_available(pa))
   1702 		sc->sc_dmat = pa->pa_dmat64;
   1703 	else
   1704 		sc->sc_dmat = pa->pa_dmat;
   1705 
   1706 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1707 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1708 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1709 
   1710 	sc->sc_type = wmp->wmp_type;
   1711 
   1712 	/* Set default function pointers */
   1713 	sc->phy.acquire = wm_get_null;
   1714 	sc->phy.release = wm_put_null;
   1715 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1716 
   1717 	if (sc->sc_type < WM_T_82543) {
   1718 		if (sc->sc_rev < 2) {
   1719 			aprint_error_dev(sc->sc_dev,
   1720 			    "i82542 must be at least rev. 2\n");
   1721 			return;
   1722 		}
   1723 		if (sc->sc_rev < 3)
   1724 			sc->sc_type = WM_T_82542_2_0;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Disable MSI for Errata:
   1729 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1730 	 *
   1731 	 *  82544: Errata 25
   1732 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1733 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1734 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1735 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1736 	 *
   1737 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1738 	 *
   1739 	 *  82571 & 82572: Errata 63
   1740 	 */
   1741 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1742 	    || (sc->sc_type == WM_T_82572))
   1743 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1744 
   1745 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1746 	    || (sc->sc_type == WM_T_82580)
   1747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1748 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1749 		sc->sc_flags |= WM_F_NEWQUEUE;
   1750 
   1751 	/* Set device properties (mactype) */
   1752 	dict = device_properties(sc->sc_dev);
   1753 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1754 
   1755 	/*
   1756 	 * Map the device.  All devices support memory-mapped acccess,
   1757 	 * and it is really required for normal operation.
   1758 	 */
   1759 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1760 	switch (memtype) {
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1763 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1764 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1765 		break;
   1766 	default:
   1767 		memh_valid = 0;
   1768 		break;
   1769 	}
   1770 
   1771 	if (memh_valid) {
   1772 		sc->sc_st = memt;
   1773 		sc->sc_sh = memh;
   1774 		sc->sc_ss = memsize;
   1775 	} else {
   1776 		aprint_error_dev(sc->sc_dev,
   1777 		    "unable to map device registers\n");
   1778 		return;
   1779 	}
   1780 
   1781 	/*
   1782 	 * In addition, i82544 and later support I/O mapped indirect
   1783 	 * register access.  It is not desirable (nor supported in
   1784 	 * this driver) to use it for normal operation, though it is
   1785 	 * required to work around bugs in some chip versions.
   1786 	 */
   1787 	if (sc->sc_type >= WM_T_82544) {
   1788 		/* First we have to find the I/O BAR. */
   1789 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1790 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1791 			if (memtype == PCI_MAPREG_TYPE_IO)
   1792 				break;
   1793 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1794 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1795 				i += 4;	/* skip high bits, too */
   1796 		}
   1797 		if (i < PCI_MAPREG_END) {
   1798 			/*
   1799 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1800 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1801 			 * It's no problem because newer chips has no this
   1802 			 * bug.
   1803 			 *
   1804 			 * The i8254x doesn't apparently respond when the
   1805 			 * I/O BAR is 0, which looks somewhat like it's not
   1806 			 * been configured.
   1807 			 */
   1808 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1809 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "WARNING: I/O BAR at zero.\n");
   1812 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1813 					0, &sc->sc_iot, &sc->sc_ioh,
   1814 					NULL, &sc->sc_ios) == 0) {
   1815 				sc->sc_flags |= WM_F_IOH_VALID;
   1816 			} else {
   1817 				aprint_error_dev(sc->sc_dev,
   1818 				    "WARNING: unable to map I/O space\n");
   1819 			}
   1820 		}
   1821 
   1822 	}
   1823 
   1824 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1825 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1826 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1827 	if (sc->sc_type < WM_T_82542_2_1)
   1828 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1829 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1830 
   1831 	/* power up chip */
   1832 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1833 	    NULL)) && error != EOPNOTSUPP) {
   1834 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1835 		return;
   1836 	}
   1837 
   1838 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1839 
   1840 	/* Allocation settings */
   1841 	max_type = PCI_INTR_TYPE_MSIX;
   1842 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1843 	counts[PCI_INTR_TYPE_MSI] = 1;
   1844 	counts[PCI_INTR_TYPE_INTX] = 1;
   1845 	/* overridden by disable flags */
   1846 	if (wm_disable_msi != 0) {
   1847 		counts[PCI_INTR_TYPE_MSI] = 0;
   1848 		if (wm_disable_msix != 0) {
   1849 			max_type = PCI_INTR_TYPE_INTX;
   1850 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1851 		}
   1852 	} else if (wm_disable_msix != 0) {
   1853 		max_type = PCI_INTR_TYPE_MSI;
   1854 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1855 	}
   1856 
   1857 alloc_retry:
   1858 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1859 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1860 		return;
   1861 	}
   1862 
   1863 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1864 		error = wm_setup_msix(sc);
   1865 		if (error) {
   1866 			pci_intr_release(pc, sc->sc_intrs,
   1867 			    counts[PCI_INTR_TYPE_MSIX]);
   1868 
   1869 			/* Setup for MSI: Disable MSI-X */
   1870 			max_type = PCI_INTR_TYPE_MSI;
   1871 			counts[PCI_INTR_TYPE_MSI] = 1;
   1872 			counts[PCI_INTR_TYPE_INTX] = 1;
   1873 			goto alloc_retry;
   1874 		}
   1875 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1876 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1877 		error = wm_setup_legacy(sc);
   1878 		if (error) {
   1879 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1880 			    counts[PCI_INTR_TYPE_MSI]);
   1881 
   1882 			/* The next try is for INTx: Disable MSI */
   1883 			max_type = PCI_INTR_TYPE_INTX;
   1884 			counts[PCI_INTR_TYPE_INTX] = 1;
   1885 			goto alloc_retry;
   1886 		}
   1887 	} else {
   1888 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1889 		error = wm_setup_legacy(sc);
   1890 		if (error) {
   1891 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1892 			    counts[PCI_INTR_TYPE_INTX]);
   1893 			return;
   1894 		}
   1895 	}
   1896 
   1897 	/*
   1898 	 * Check the function ID (unit number of the chip).
   1899 	 */
   1900 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1901 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1902 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1903 	    || (sc->sc_type == WM_T_82580)
   1904 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1905 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1906 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1907 	else
   1908 		sc->sc_funcid = 0;
   1909 
   1910 	/*
   1911 	 * Determine a few things about the bus we're connected to.
   1912 	 */
   1913 	if (sc->sc_type < WM_T_82543) {
   1914 		/* We don't really know the bus characteristics here. */
   1915 		sc->sc_bus_speed = 33;
   1916 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1917 		/*
   1918 		 * CSA (Communication Streaming Architecture) is about as fast
   1919 		 * a 32-bit 66MHz PCI Bus.
   1920 		 */
   1921 		sc->sc_flags |= WM_F_CSA;
   1922 		sc->sc_bus_speed = 66;
   1923 		aprint_verbose_dev(sc->sc_dev,
   1924 		    "Communication Streaming Architecture\n");
   1925 		if (sc->sc_type == WM_T_82547) {
   1926 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1927 			callout_setfunc(&sc->sc_txfifo_ch,
   1928 					wm_82547_txfifo_stall, sc);
   1929 			aprint_verbose_dev(sc->sc_dev,
   1930 			    "using 82547 Tx FIFO stall work-around\n");
   1931 		}
   1932 	} else if (sc->sc_type >= WM_T_82571) {
   1933 		sc->sc_flags |= WM_F_PCIE;
   1934 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1935 		    && (sc->sc_type != WM_T_ICH10)
   1936 		    && (sc->sc_type != WM_T_PCH)
   1937 		    && (sc->sc_type != WM_T_PCH2)
   1938 		    && (sc->sc_type != WM_T_PCH_LPT)
   1939 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1940 			/* ICH* and PCH* have no PCIe capability registers */
   1941 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1942 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1943 				NULL) == 0)
   1944 				aprint_error_dev(sc->sc_dev,
   1945 				    "unable to find PCIe capability\n");
   1946 		}
   1947 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1948 	} else {
   1949 		reg = CSR_READ(sc, WMREG_STATUS);
   1950 		if (reg & STATUS_BUS64)
   1951 			sc->sc_flags |= WM_F_BUS64;
   1952 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1953 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1954 
   1955 			sc->sc_flags |= WM_F_PCIX;
   1956 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1957 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIX capability\n");
   1960 			else if (sc->sc_type != WM_T_82545_3 &&
   1961 				 sc->sc_type != WM_T_82546_3) {
   1962 				/*
   1963 				 * Work around a problem caused by the BIOS
   1964 				 * setting the max memory read byte count
   1965 				 * incorrectly.
   1966 				 */
   1967 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1968 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1969 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1971 
   1972 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1973 				    PCIX_CMD_BYTECNT_SHIFT;
   1974 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1975 				    PCIX_STATUS_MAXB_SHIFT;
   1976 				if (bytecnt > maxb) {
   1977 					aprint_verbose_dev(sc->sc_dev,
   1978 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1979 					    512 << bytecnt, 512 << maxb);
   1980 					pcix_cmd = (pcix_cmd &
   1981 					    ~PCIX_CMD_BYTECNT_MASK) |
   1982 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1983 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1984 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1985 					    pcix_cmd);
   1986 				}
   1987 			}
   1988 		}
   1989 		/*
   1990 		 * The quad port adapter is special; it has a PCIX-PCIX
   1991 		 * bridge on the board, and can run the secondary bus at
   1992 		 * a higher speed.
   1993 		 */
   1994 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1995 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1996 								      : 66;
   1997 		} else if (sc->sc_flags & WM_F_PCIX) {
   1998 			switch (reg & STATUS_PCIXSPD_MASK) {
   1999 			case STATUS_PCIXSPD_50_66:
   2000 				sc->sc_bus_speed = 66;
   2001 				break;
   2002 			case STATUS_PCIXSPD_66_100:
   2003 				sc->sc_bus_speed = 100;
   2004 				break;
   2005 			case STATUS_PCIXSPD_100_133:
   2006 				sc->sc_bus_speed = 133;
   2007 				break;
   2008 			default:
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2011 				    reg & STATUS_PCIXSPD_MASK);
   2012 				sc->sc_bus_speed = 66;
   2013 				break;
   2014 			}
   2015 		} else
   2016 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2017 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2018 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2019 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2020 	}
   2021 
   2022 	/* clear interesting stat counters */
   2023 	CSR_READ(sc, WMREG_COLC);
   2024 	CSR_READ(sc, WMREG_RXERRC);
   2025 
   2026 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2027 	    || (sc->sc_type >= WM_T_ICH8))
   2028 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2029 	if (sc->sc_type >= WM_T_ICH8)
   2030 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 
   2032 	/* Set PHY, NVM mutex related stuff */
   2033 	switch (sc->sc_type) {
   2034 	case WM_T_82542_2_0:
   2035 	case WM_T_82542_2_1:
   2036 	case WM_T_82543:
   2037 	case WM_T_82544:
   2038 		/* Microwire */
   2039 		sc->sc_nvm_wordsize = 64;
   2040 		sc->sc_nvm_addrbits = 6;
   2041 		break;
   2042 	case WM_T_82540:
   2043 	case WM_T_82545:
   2044 	case WM_T_82545_3:
   2045 	case WM_T_82546:
   2046 	case WM_T_82546_3:
   2047 		/* Microwire */
   2048 		reg = CSR_READ(sc, WMREG_EECD);
   2049 		if (reg & EECD_EE_SIZE) {
   2050 			sc->sc_nvm_wordsize = 256;
   2051 			sc->sc_nvm_addrbits = 8;
   2052 		} else {
   2053 			sc->sc_nvm_wordsize = 64;
   2054 			sc->sc_nvm_addrbits = 6;
   2055 		}
   2056 		sc->sc_flags |= WM_F_LOCK_EECD;
   2057 		break;
   2058 	case WM_T_82541:
   2059 	case WM_T_82541_2:
   2060 	case WM_T_82547:
   2061 	case WM_T_82547_2:
   2062 		sc->sc_flags |= WM_F_LOCK_EECD;
   2063 		reg = CSR_READ(sc, WMREG_EECD);
   2064 		if (reg & EECD_EE_TYPE) {
   2065 			/* SPI */
   2066 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2067 			wm_nvm_set_addrbits_size_eecd(sc);
   2068 		} else {
   2069 			/* Microwire */
   2070 			if ((reg & EECD_EE_ABITS) != 0) {
   2071 				sc->sc_nvm_wordsize = 256;
   2072 				sc->sc_nvm_addrbits = 8;
   2073 			} else {
   2074 				sc->sc_nvm_wordsize = 64;
   2075 				sc->sc_nvm_addrbits = 6;
   2076 			}
   2077 		}
   2078 		break;
   2079 	case WM_T_82571:
   2080 	case WM_T_82572:
   2081 		/* SPI */
   2082 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 		wm_nvm_set_addrbits_size_eecd(sc);
   2084 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2085 		sc->phy.acquire = wm_get_swsm_semaphore;
   2086 		sc->phy.release = wm_put_swsm_semaphore;
   2087 		break;
   2088 	case WM_T_82573:
   2089 	case WM_T_82574:
   2090 	case WM_T_82583:
   2091 		if (sc->sc_type == WM_T_82573) {
   2092 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2093 			sc->phy.acquire = wm_get_swsm_semaphore;
   2094 			sc->phy.release = wm_put_swsm_semaphore;
   2095 		} else {
   2096 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2097 			/* Both PHY and NVM use the same semaphore. */
   2098 			sc->phy.acquire
   2099 			    = wm_get_swfwhw_semaphore;
   2100 			sc->phy.release
   2101 			    = wm_put_swfwhw_semaphore;
   2102 		}
   2103 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2104 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2105 			sc->sc_nvm_wordsize = 2048;
   2106 		} else {
   2107 			/* SPI */
   2108 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2109 			wm_nvm_set_addrbits_size_eecd(sc);
   2110 		}
   2111 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354:
   2118 	case WM_T_80003:
   2119 		/* SPI */
   2120 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2121 		wm_nvm_set_addrbits_size_eecd(sc);
   2122 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2123 		    | WM_F_LOCK_SWSM;
   2124 		sc->phy.acquire = wm_get_phy_82575;
   2125 		sc->phy.release = wm_put_phy_82575;
   2126 		break;
   2127 	case WM_T_ICH8:
   2128 	case WM_T_ICH9:
   2129 	case WM_T_ICH10:
   2130 	case WM_T_PCH:
   2131 	case WM_T_PCH2:
   2132 	case WM_T_PCH_LPT:
   2133 		/* FLASH */
   2134 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2135 		sc->sc_nvm_wordsize = 2048;
   2136 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2137 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2138 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2139 			aprint_error_dev(sc->sc_dev,
   2140 			    "can't map FLASH registers\n");
   2141 			goto out;
   2142 		}
   2143 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2144 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2145 		    ICH_FLASH_SECTOR_SIZE;
   2146 		sc->sc_ich8_flash_bank_size =
   2147 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2148 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2149 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2150 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2151 		sc->sc_flashreg_offset = 0;
   2152 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2153 		sc->phy.release = wm_put_swflag_ich8lan;
   2154 		break;
   2155 	case WM_T_PCH_SPT:
   2156 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2157 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2158 		sc->sc_flasht = sc->sc_st;
   2159 		sc->sc_flashh = sc->sc_sh;
   2160 		sc->sc_ich8_flash_base = 0;
   2161 		sc->sc_nvm_wordsize =
   2162 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2163 			* NVM_SIZE_MULTIPLIER;
   2164 		/* It is size in bytes, we want words */
   2165 		sc->sc_nvm_wordsize /= 2;
   2166 		/* assume 2 banks */
   2167 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2168 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2169 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2170 		sc->phy.release = wm_put_swflag_ich8lan;
   2171 		break;
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2177 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2178 		} else {
   2179 			sc->sc_nvm_wordsize = INVM_SIZE;
   2180 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2181 		}
   2182 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2183 		sc->phy.acquire = wm_get_phy_82575;
   2184 		sc->phy.release = wm_put_phy_82575;
   2185 		break;
   2186 	default:
   2187 		break;
   2188 	}
   2189 
   2190 	/* Reset the chip to a known state. */
   2191 	wm_reset(sc);
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	/* Set device properties (macflags) */
   2240 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2241 
   2242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2243 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2244 	else {
   2245 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2246 		    sc->sc_nvm_wordsize);
   2247 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2248 			aprint_verbose("iNVM");
   2249 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2250 			aprint_verbose("FLASH(HW)");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2252 			aprint_verbose("FLASH");
   2253 		else {
   2254 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2255 				eetype = "SPI";
   2256 			else
   2257 				eetype = "MicroWire";
   2258 			aprint_verbose("(%d address bits) %s EEPROM",
   2259 			    sc->sc_nvm_addrbits, eetype);
   2260 		}
   2261 	}
   2262 	wm_nvm_version(sc);
   2263 	aprint_verbose("\n");
   2264 
   2265 	/* Check for I21[01] PLL workaround */
   2266 	if (sc->sc_type == WM_T_I210)
   2267 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2268 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2269 		/* NVM image release 3.25 has a workaround */
   2270 		if ((sc->sc_nvm_ver_major < 3)
   2271 		    || ((sc->sc_nvm_ver_major == 3)
   2272 			&& (sc->sc_nvm_ver_minor < 25))) {
   2273 			aprint_verbose_dev(sc->sc_dev,
   2274 			    "ROM image version %d.%d is older than 3.25\n",
   2275 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2276 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2277 		}
   2278 	}
   2279 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2280 		wm_pll_workaround_i210(sc);
   2281 
   2282 	wm_get_wakeup(sc);
   2283 
   2284 	/* Non-AMT based hardware can now take control from firmware */
   2285 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2286 		wm_get_hw_control(sc);
   2287 
   2288 	/*
   2289 	 * Read the Ethernet address from the EEPROM, if not first found
   2290 	 * in device properties.
   2291 	 */
   2292 	ea = prop_dictionary_get(dict, "mac-address");
   2293 	if (ea != NULL) {
   2294 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2295 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2296 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2297 	} else {
   2298 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2299 			aprint_error_dev(sc->sc_dev,
   2300 			    "unable to read Ethernet address\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2306 	    ether_sprintf(enaddr));
   2307 
   2308 	/*
   2309 	 * Read the config info from the EEPROM, and set up various
   2310 	 * bits in the control registers based on their contents.
   2311 	 */
   2312 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2313 	if (pn != NULL) {
   2314 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2316 	} else {
   2317 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2318 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2319 			goto out;
   2320 		}
   2321 	}
   2322 
   2323 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2324 	if (pn != NULL) {
   2325 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2326 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2327 	} else {
   2328 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2329 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2330 			goto out;
   2331 		}
   2332 	}
   2333 
   2334 	/* check for WM_F_WOL */
   2335 	switch (sc->sc_type) {
   2336 	case WM_T_82542_2_0:
   2337 	case WM_T_82542_2_1:
   2338 	case WM_T_82543:
   2339 		/* dummy? */
   2340 		eeprom_data = 0;
   2341 		apme_mask = NVM_CFG3_APME;
   2342 		break;
   2343 	case WM_T_82544:
   2344 		apme_mask = NVM_CFG2_82544_APM_EN;
   2345 		eeprom_data = cfg2;
   2346 		break;
   2347 	case WM_T_82546:
   2348 	case WM_T_82546_3:
   2349 	case WM_T_82571:
   2350 	case WM_T_82572:
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 	case WM_T_80003:
   2355 	default:
   2356 		apme_mask = NVM_CFG3_APME;
   2357 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2358 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2359 		break;
   2360 	case WM_T_82575:
   2361 	case WM_T_82576:
   2362 	case WM_T_82580:
   2363 	case WM_T_I350:
   2364 	case WM_T_I354: /* XXX ok? */
   2365 	case WM_T_ICH8:
   2366 	case WM_T_ICH9:
   2367 	case WM_T_ICH10:
   2368 	case WM_T_PCH:
   2369 	case WM_T_PCH2:
   2370 	case WM_T_PCH_LPT:
   2371 	case WM_T_PCH_SPT:
   2372 		/* XXX The funcid should be checked on some devices */
   2373 		apme_mask = WUC_APME;
   2374 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2375 		break;
   2376 	}
   2377 
   2378 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2379 	if ((eeprom_data & apme_mask) != 0)
   2380 		sc->sc_flags |= WM_F_WOL;
   2381 
   2382 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2383 		/* Check NVM for autonegotiation */
   2384 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2385 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2386 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2387 		}
   2388 	}
   2389 
   2390 	/*
   2391 	 * XXX need special handling for some multiple port cards
   2392 	 * to disable a paticular port.
   2393 	 */
   2394 
   2395 	if (sc->sc_type >= WM_T_82544) {
   2396 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2397 		if (pn != NULL) {
   2398 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2399 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2400 		} else {
   2401 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2402 				aprint_error_dev(sc->sc_dev,
   2403 				    "unable to read SWDPIN\n");
   2404 				goto out;
   2405 			}
   2406 		}
   2407 	}
   2408 
   2409 	if (cfg1 & NVM_CFG1_ILOS)
   2410 		sc->sc_ctrl |= CTRL_ILOS;
   2411 
   2412 	/*
   2413 	 * XXX
   2414 	 * This code isn't correct because pin 2 and 3 are located
   2415 	 * in different position on newer chips. Check all datasheet.
   2416 	 *
   2417 	 * Until resolve this problem, check if a chip < 82580
   2418 	 */
   2419 	if (sc->sc_type <= WM_T_82580) {
   2420 		if (sc->sc_type >= WM_T_82544) {
   2421 			sc->sc_ctrl |=
   2422 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2423 			    CTRL_SWDPIO_SHIFT;
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPINS_SHIFT;
   2427 		} else {
   2428 			sc->sc_ctrl |=
   2429 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPIO_SHIFT;
   2431 		}
   2432 	}
   2433 
   2434 	/* XXX For other than 82580? */
   2435 	if (sc->sc_type == WM_T_82580) {
   2436 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2437 		if (nvmword & __BIT(13))
   2438 			sc->sc_ctrl |= CTRL_ILOS;
   2439 	}
   2440 
   2441 #if 0
   2442 	if (sc->sc_type >= WM_T_82544) {
   2443 		if (cfg1 & NVM_CFG1_IPS0)
   2444 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2445 		if (cfg1 & NVM_CFG1_IPS1)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2447 		sc->sc_ctrl_ext |=
   2448 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2449 		    CTRL_EXT_SWDPIO_SHIFT;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPINS_SHIFT;
   2453 	} else {
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2456 		    CTRL_EXT_SWDPIO_SHIFT;
   2457 	}
   2458 #endif
   2459 
   2460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2461 #if 0
   2462 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2463 #endif
   2464 
   2465 	if (sc->sc_type == WM_T_PCH) {
   2466 		uint16_t val;
   2467 
   2468 		/* Save the NVM K1 bit setting */
   2469 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2470 
   2471 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2472 			sc->sc_nvm_k1_enabled = 1;
   2473 		else
   2474 			sc->sc_nvm_k1_enabled = 0;
   2475 	}
   2476 
   2477 	/*
   2478 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2479 	 * media structures accordingly.
   2480 	 */
   2481 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2482 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2483 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2484 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2485 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2486 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2487 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2489 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2490 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2491 	    || (sc->sc_type ==WM_T_I211)) {
   2492 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2493 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2494 		switch (link_mode) {
   2495 		case CTRL_EXT_LINK_MODE_1000KX:
   2496 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2497 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2498 			break;
   2499 		case CTRL_EXT_LINK_MODE_SGMII:
   2500 			if (wm_sgmii_uses_mdio(sc)) {
   2501 				aprint_verbose_dev(sc->sc_dev,
   2502 				    "SGMII(MDIO)\n");
   2503 				sc->sc_flags |= WM_F_SGMII;
   2504 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 				break;
   2506 			}
   2507 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2508 			/*FALLTHROUGH*/
   2509 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2510 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2511 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2512 				if (link_mode
   2513 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2514 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2515 					sc->sc_flags |= WM_F_SGMII;
   2516 				} else {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2518 					aprint_verbose_dev(sc->sc_dev,
   2519 					    "SERDES\n");
   2520 				}
   2521 				break;
   2522 			}
   2523 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2524 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2525 
   2526 			/* Change current link mode setting */
   2527 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2528 			switch (sc->sc_mediatype) {
   2529 			case WM_MEDIATYPE_COPPER:
   2530 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2531 				break;
   2532 			case WM_MEDIATYPE_SERDES:
   2533 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2534 				break;
   2535 			default:
   2536 				break;
   2537 			}
   2538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2539 			break;
   2540 		case CTRL_EXT_LINK_MODE_GMII:
   2541 		default:
   2542 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2544 			break;
   2545 		}
   2546 
   2547 		reg &= ~CTRL_EXT_I2C_ENA;
   2548 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2549 			reg |= CTRL_EXT_I2C_ENA;
   2550 		else
   2551 			reg &= ~CTRL_EXT_I2C_ENA;
   2552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2553 
   2554 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2555 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2556 		else
   2557 			wm_tbi_mediainit(sc);
   2558 	} else if (sc->sc_type < WM_T_82543 ||
   2559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2561 			aprint_error_dev(sc->sc_dev,
   2562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2563 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2564 		}
   2565 		wm_tbi_mediainit(sc);
   2566 	} else {
   2567 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2568 			aprint_error_dev(sc->sc_dev,
   2569 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2570 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2571 		}
   2572 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2573 	}
   2574 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2575 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2576 
   2577 	ifp = &sc->sc_ethercom.ec_if;
   2578 	xname = device_xname(sc->sc_dev);
   2579 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2580 	ifp->if_softc = sc;
   2581 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2582 #ifdef WM_MPSAFE
   2583 	ifp->if_extflags = IFEF_START_MPSAFE;
   2584 #endif
   2585 	ifp->if_ioctl = wm_ioctl;
   2586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2587 		ifp->if_start = wm_nq_start;
   2588 		/*
   2589 		 * When the number of CPUs is one and the controller can use
   2590 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2591 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2592 		 * and the other is used for link status changing.
   2593 		 * In this situation, wm_nq_transmit() is disadvantageous
   2594 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2595 		 */
   2596 		if (wm_is_using_multiqueue(sc))
   2597 			ifp->if_transmit = wm_nq_transmit;
   2598 	} else {
   2599 		ifp->if_start = wm_start;
   2600 		/*
   2601 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_transmit;
   2605 	}
   2606 	ifp->if_watchdog = wm_watchdog;
   2607 	ifp->if_init = wm_init;
   2608 	ifp->if_stop = wm_stop;
   2609 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2610 	IFQ_SET_READY(&ifp->if_snd);
   2611 
   2612 	/* Check for jumbo frame */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82573:
   2615 		/* XXX limited to 9234 if ASPM is disabled */
   2616 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2617 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2618 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2619 		break;
   2620 	case WM_T_82571:
   2621 	case WM_T_82572:
   2622 	case WM_T_82574:
   2623 	case WM_T_82575:
   2624 	case WM_T_82576:
   2625 	case WM_T_82580:
   2626 	case WM_T_I350:
   2627 	case WM_T_I354: /* XXXX ok? */
   2628 	case WM_T_I210:
   2629 	case WM_T_I211:
   2630 	case WM_T_80003:
   2631 	case WM_T_ICH9:
   2632 	case WM_T_ICH10:
   2633 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2634 	case WM_T_PCH_LPT:
   2635 	case WM_T_PCH_SPT:
   2636 		/* XXX limited to 9234 */
   2637 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2638 		break;
   2639 	case WM_T_PCH:
   2640 		/* XXX limited to 4096 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_82542_2_0:
   2644 	case WM_T_82542_2_1:
   2645 	case WM_T_82583:
   2646 	case WM_T_ICH8:
   2647 		/* No support for jumbo frame */
   2648 		break;
   2649 	default:
   2650 		/* ETHER_MAX_LEN_JUMBO */
   2651 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2652 		break;
   2653 	}
   2654 
   2655 	/* If we're a i82543 or greater, we can support VLANs. */
   2656 	if (sc->sc_type >= WM_T_82543)
   2657 		sc->sc_ethercom.ec_capabilities |=
   2658 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2659 
   2660 	/*
   2661 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2662 	 * on i82543 and later.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82543) {
   2665 		ifp->if_capabilities |=
   2666 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2667 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2668 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv6_Tx |
   2670 		    IFCAP_CSUM_UDPv6_Tx;
   2671 	}
   2672 
   2673 	/*
   2674 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2675 	 *
   2676 	 *	82541GI (8086:1076) ... no
   2677 	 *	82572EI (8086:10b9) ... yes
   2678 	 */
   2679 	if (sc->sc_type >= WM_T_82571) {
   2680 		ifp->if_capabilities |=
   2681 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2682 	}
   2683 
   2684 	/*
   2685 	 * If we're a i82544 or greater (except i82547), we can do
   2686 	 * TCP segmentation offload.
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2689 		ifp->if_capabilities |= IFCAP_TSOv4;
   2690 	}
   2691 
   2692 	if (sc->sc_type >= WM_T_82571) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv6;
   2694 	}
   2695 
   2696 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2697 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2698 
   2699 #ifdef WM_MPSAFE
   2700 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2701 #else
   2702 	sc->sc_core_lock = NULL;
   2703 #endif
   2704 
   2705 	/* Attach the interface. */
   2706 	if_initialize(ifp);
   2707 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2708 	ether_ifattach(ifp, enaddr);
   2709 	if_register(ifp);
   2710 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2711 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2712 			  RND_FLAG_DEFAULT);
   2713 
   2714 #ifdef WM_EVENT_COUNTERS
   2715 	/* Attach event counters. */
   2716 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2717 	    NULL, xname, "linkintr");
   2718 
   2719 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2720 	    NULL, xname, "tx_xoff");
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xon");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "rx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_macctl");
   2729 #endif /* WM_EVENT_COUNTERS */
   2730 
   2731 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2732 		pmf_class_network_register(self, ifp);
   2733 	else
   2734 		aprint_error_dev(self, "couldn't establish power handler\n");
   2735 
   2736 	sc->sc_flags |= WM_F_ATTACHED;
   2737  out:
   2738 	return;
   2739 }
   2740 
   2741 /* The detach function (ca_detach) */
   2742 static int
   2743 wm_detach(device_t self, int flags __unused)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2747 	int i;
   2748 
   2749 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2750 		return 0;
   2751 
   2752 	/* Stop the interface. Callouts are stopped in it. */
   2753 	wm_stop(ifp, 1);
   2754 
   2755 	pmf_device_deregister(self);
   2756 
   2757 #ifdef WM_EVENT_COUNTERS
   2758 	evcnt_detach(&sc->sc_ev_linkintr);
   2759 
   2760 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2761 	evcnt_detach(&sc->sc_ev_tx_xon);
   2762 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_rx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2765 #endif /* WM_EVENT_COUNTERS */
   2766 
   2767 	/* Tell the firmware about the release */
   2768 	WM_CORE_LOCK(sc);
   2769 	wm_release_manageability(sc);
   2770 	wm_release_hw_control(sc);
   2771 	wm_enable_wakeup(sc);
   2772 	WM_CORE_UNLOCK(sc);
   2773 
   2774 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2775 
   2776 	/* Delete all remaining media. */
   2777 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2778 
   2779 	ether_ifdetach(ifp);
   2780 	if_detach(ifp);
   2781 	if_percpuq_destroy(sc->sc_ipq);
   2782 
   2783 	/* Unload RX dmamaps and free mbufs */
   2784 	for (i = 0; i < sc->sc_nqueues; i++) {
   2785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2786 		mutex_enter(rxq->rxq_lock);
   2787 		wm_rxdrain(rxq);
   2788 		mutex_exit(rxq->rxq_lock);
   2789 	}
   2790 	/* Must unlock here */
   2791 
   2792 	/* Disestablish the interrupt handler */
   2793 	for (i = 0; i < sc->sc_nintrs; i++) {
   2794 		if (sc->sc_ihs[i] != NULL) {
   2795 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2796 			sc->sc_ihs[i] = NULL;
   2797 		}
   2798 	}
   2799 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2800 
   2801 	wm_free_txrx_queues(sc);
   2802 
   2803 	/* Unmap the registers */
   2804 	if (sc->sc_ss) {
   2805 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2806 		sc->sc_ss = 0;
   2807 	}
   2808 	if (sc->sc_ios) {
   2809 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2810 		sc->sc_ios = 0;
   2811 	}
   2812 	if (sc->sc_flashs) {
   2813 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2814 		sc->sc_flashs = 0;
   2815 	}
   2816 
   2817 	if (sc->sc_core_lock)
   2818 		mutex_obj_free(sc->sc_core_lock);
   2819 	if (sc->sc_ich_phymtx)
   2820 		mutex_obj_free(sc->sc_ich_phymtx);
   2821 	if (sc->sc_ich_nvmmtx)
   2822 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2823 
   2824 	return 0;
   2825 }
   2826 
   2827 static bool
   2828 wm_suspend(device_t self, const pmf_qual_t *qual)
   2829 {
   2830 	struct wm_softc *sc = device_private(self);
   2831 
   2832 	wm_release_manageability(sc);
   2833 	wm_release_hw_control(sc);
   2834 	wm_enable_wakeup(sc);
   2835 
   2836 	return true;
   2837 }
   2838 
   2839 static bool
   2840 wm_resume(device_t self, const pmf_qual_t *qual)
   2841 {
   2842 	struct wm_softc *sc = device_private(self);
   2843 
   2844 	wm_init_manageability(sc);
   2845 
   2846 	return true;
   2847 }
   2848 
   2849 /*
   2850  * wm_watchdog:		[ifnet interface function]
   2851  *
   2852  *	Watchdog timer handler.
   2853  */
   2854 static void
   2855 wm_watchdog(struct ifnet *ifp)
   2856 {
   2857 	int qid;
   2858 	struct wm_softc *sc = ifp->if_softc;
   2859 
   2860 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2861 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2862 
   2863 		wm_watchdog_txq(ifp, txq);
   2864 	}
   2865 
   2866 	/* Reset the interface. */
   2867 	(void) wm_init(ifp);
   2868 
   2869 	/*
   2870 	 * There are still some upper layer processing which call
   2871 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2872 	 */
   2873 	/* Try to get more packets going. */
   2874 	ifp->if_start(ifp);
   2875 }
   2876 
   2877 static void
   2878 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2879 {
   2880 	struct wm_softc *sc = ifp->if_softc;
   2881 
   2882 	/*
   2883 	 * Since we're using delayed interrupts, sweep up
   2884 	 * before we report an error.
   2885 	 */
   2886 	mutex_enter(txq->txq_lock);
   2887 	wm_txeof(sc, txq);
   2888 	mutex_exit(txq->txq_lock);
   2889 
   2890 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2891 #ifdef WM_DEBUG
   2892 		int i, j;
   2893 		struct wm_txsoft *txs;
   2894 #endif
   2895 		log(LOG_ERR,
   2896 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2897 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2898 		    txq->txq_next);
   2899 		ifp->if_oerrors++;
   2900 #ifdef WM_DEBUG
   2901 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2902 		    i = WM_NEXTTXS(txq, i)) {
   2903 		    txs = &txq->txq_soft[i];
   2904 		    printf("txs %d tx %d -> %d\n",
   2905 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2906 		    for (j = txs->txs_firstdesc; ;
   2907 			j = WM_NEXTTX(txq, j)) {
   2908 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2909 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2910 			printf("\t %#08x%08x\n",
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2913 			if (j == txs->txs_lastdesc)
   2914 				break;
   2915 			}
   2916 		}
   2917 #endif
   2918 	}
   2919 }
   2920 
   2921 /*
   2922  * wm_tick:
   2923  *
   2924  *	One second timer, used to check link status, sweep up
   2925  *	completed transmit jobs, etc.
   2926  */
   2927 static void
   2928 wm_tick(void *arg)
   2929 {
   2930 	struct wm_softc *sc = arg;
   2931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2932 #ifndef WM_MPSAFE
   2933 	int s = splnet();
   2934 #endif
   2935 
   2936 	WM_CORE_LOCK(sc);
   2937 
   2938 	if (sc->sc_core_stopping)
   2939 		goto out;
   2940 
   2941 	if (sc->sc_type >= WM_T_82542_2_1) {
   2942 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2943 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2947 	}
   2948 
   2949 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2950 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2951 	    + CSR_READ(sc, WMREG_CRCERRS)
   2952 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2953 	    + CSR_READ(sc, WMREG_SYMERRC)
   2954 	    + CSR_READ(sc, WMREG_RXERRC)
   2955 	    + CSR_READ(sc, WMREG_SEC)
   2956 	    + CSR_READ(sc, WMREG_CEXTERR)
   2957 	    + CSR_READ(sc, WMREG_RLEC);
   2958 	/*
   2959 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2960 	 * memory. It does not mean the number of dropped packet. Because
   2961 	 * ethernet controller can receive packets in such case if there is
   2962 	 * space in phy's FIFO.
   2963 	 *
   2964 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2965 	 * own EVCNT instead of if_iqdrops.
   2966 	 */
   2967 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2968 
   2969 	if (sc->sc_flags & WM_F_HAS_MII)
   2970 		mii_tick(&sc->sc_mii);
   2971 	else if ((sc->sc_type >= WM_T_82575)
   2972 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2973 		wm_serdes_tick(sc);
   2974 	else
   2975 		wm_tbi_tick(sc);
   2976 
   2977 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2978 out:
   2979 	WM_CORE_UNLOCK(sc);
   2980 #ifndef WM_MPSAFE
   2981 	splx(s);
   2982 #endif
   2983 }
   2984 
   2985 static int
   2986 wm_ifflags_cb(struct ethercom *ec)
   2987 {
   2988 	struct ifnet *ifp = &ec->ec_if;
   2989 	struct wm_softc *sc = ifp->if_softc;
   2990 	int rc = 0;
   2991 
   2992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2993 		device_xname(sc->sc_dev), __func__));
   2994 
   2995 	WM_CORE_LOCK(sc);
   2996 
   2997 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2998 	sc->sc_if_flags = ifp->if_flags;
   2999 
   3000 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3001 		rc = ENETRESET;
   3002 		goto out;
   3003 	}
   3004 
   3005 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3006 		wm_set_filter(sc);
   3007 
   3008 	wm_set_vlan(sc);
   3009 
   3010 out:
   3011 	WM_CORE_UNLOCK(sc);
   3012 
   3013 	return rc;
   3014 }
   3015 
   3016 /*
   3017  * wm_ioctl:		[ifnet interface function]
   3018  *
   3019  *	Handle control requests from the operator.
   3020  */
   3021 static int
   3022 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct ifreq *ifr = (struct ifreq *) data;
   3026 	struct ifaddr *ifa = (struct ifaddr *)data;
   3027 	struct sockaddr_dl *sdl;
   3028 	int s, error;
   3029 
   3030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3031 		device_xname(sc->sc_dev), __func__));
   3032 
   3033 #ifndef WM_MPSAFE
   3034 	s = splnet();
   3035 #endif
   3036 	switch (cmd) {
   3037 	case SIOCSIFMEDIA:
   3038 	case SIOCGIFMEDIA:
   3039 		WM_CORE_LOCK(sc);
   3040 		/* Flow control requires full-duplex mode. */
   3041 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3042 		    (ifr->ifr_media & IFM_FDX) == 0)
   3043 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3044 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3045 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3046 				/* We can do both TXPAUSE and RXPAUSE. */
   3047 				ifr->ifr_media |=
   3048 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3049 			}
   3050 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3051 		}
   3052 		WM_CORE_UNLOCK(sc);
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3057 #ifdef WM_MPSAFE
   3058 		splx(s);
   3059 #endif
   3060 		break;
   3061 	case SIOCINITIFADDR:
   3062 		WM_CORE_LOCK(sc);
   3063 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3064 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3065 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3066 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3067 			/* unicast address is first multicast entry */
   3068 			wm_set_filter(sc);
   3069 			error = 0;
   3070 			WM_CORE_UNLOCK(sc);
   3071 			break;
   3072 		}
   3073 		WM_CORE_UNLOCK(sc);
   3074 		/*FALLTHROUGH*/
   3075 	default:
   3076 #ifdef WM_MPSAFE
   3077 		s = splnet();
   3078 #endif
   3079 		/* It may call wm_start, so unlock here */
   3080 		error = ether_ioctl(ifp, cmd, data);
   3081 #ifdef WM_MPSAFE
   3082 		splx(s);
   3083 #endif
   3084 		if (error != ENETRESET)
   3085 			break;
   3086 
   3087 		error = 0;
   3088 
   3089 		if (cmd == SIOCSIFCAP) {
   3090 			error = (*ifp->if_init)(ifp);
   3091 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3092 			;
   3093 		else if (ifp->if_flags & IFF_RUNNING) {
   3094 			/*
   3095 			 * Multicast list has changed; set the hardware filter
   3096 			 * accordingly.
   3097 			 */
   3098 			WM_CORE_LOCK(sc);
   3099 			wm_set_filter(sc);
   3100 			WM_CORE_UNLOCK(sc);
   3101 		}
   3102 		break;
   3103 	}
   3104 
   3105 #ifndef WM_MPSAFE
   3106 	splx(s);
   3107 #endif
   3108 	return error;
   3109 }
   3110 
   3111 /* MAC address related */
   3112 
   3113 /*
   3114  * Get the offset of MAC address and return it.
   3115  * If error occured, use offset 0.
   3116  */
   3117 static uint16_t
   3118 wm_check_alt_mac_addr(struct wm_softc *sc)
   3119 {
   3120 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3121 	uint16_t offset = NVM_OFF_MACADDR;
   3122 
   3123 	/* Try to read alternative MAC address pointer */
   3124 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3125 		return 0;
   3126 
   3127 	/* Check pointer if it's valid or not. */
   3128 	if ((offset == 0x0000) || (offset == 0xffff))
   3129 		return 0;
   3130 
   3131 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3132 	/*
   3133 	 * Check whether alternative MAC address is valid or not.
   3134 	 * Some cards have non 0xffff pointer but those don't use
   3135 	 * alternative MAC address in reality.
   3136 	 *
   3137 	 * Check whether the broadcast bit is set or not.
   3138 	 */
   3139 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3140 		if (((myea[0] & 0xff) & 0x01) == 0)
   3141 			return offset; /* Found */
   3142 
   3143 	/* Not found */
   3144 	return 0;
   3145 }
   3146 
   3147 static int
   3148 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3149 {
   3150 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3151 	uint16_t offset = NVM_OFF_MACADDR;
   3152 	int do_invert = 0;
   3153 
   3154 	switch (sc->sc_type) {
   3155 	case WM_T_82580:
   3156 	case WM_T_I350:
   3157 	case WM_T_I354:
   3158 		/* EEPROM Top Level Partitioning */
   3159 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3160 		break;
   3161 	case WM_T_82571:
   3162 	case WM_T_82575:
   3163 	case WM_T_82576:
   3164 	case WM_T_80003:
   3165 	case WM_T_I210:
   3166 	case WM_T_I211:
   3167 		offset = wm_check_alt_mac_addr(sc);
   3168 		if (offset == 0)
   3169 			if ((sc->sc_funcid & 0x01) == 1)
   3170 				do_invert = 1;
   3171 		break;
   3172 	default:
   3173 		if ((sc->sc_funcid & 0x01) == 1)
   3174 			do_invert = 1;
   3175 		break;
   3176 	}
   3177 
   3178 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3179 		goto bad;
   3180 
   3181 	enaddr[0] = myea[0] & 0xff;
   3182 	enaddr[1] = myea[0] >> 8;
   3183 	enaddr[2] = myea[1] & 0xff;
   3184 	enaddr[3] = myea[1] >> 8;
   3185 	enaddr[4] = myea[2] & 0xff;
   3186 	enaddr[5] = myea[2] >> 8;
   3187 
   3188 	/*
   3189 	 * Toggle the LSB of the MAC address on the second port
   3190 	 * of some dual port cards.
   3191 	 */
   3192 	if (do_invert != 0)
   3193 		enaddr[5] ^= 1;
   3194 
   3195 	return 0;
   3196 
   3197  bad:
   3198 	return -1;
   3199 }
   3200 
   3201 /*
   3202  * wm_set_ral:
   3203  *
   3204  *	Set an entery in the receive address list.
   3205  */
   3206 static void
   3207 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3208 {
   3209 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3210 	uint32_t wlock_mac;
   3211 	int rv;
   3212 
   3213 	if (enaddr != NULL) {
   3214 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3215 		    (enaddr[3] << 24);
   3216 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3217 		ral_hi |= RAL_AV;
   3218 	} else {
   3219 		ral_lo = 0;
   3220 		ral_hi = 0;
   3221 	}
   3222 
   3223 	switch (sc->sc_type) {
   3224 	case WM_T_82542_2_0:
   3225 	case WM_T_82542_2_1:
   3226 	case WM_T_82543:
   3227 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3228 		CSR_WRITE_FLUSH(sc);
   3229 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3230 		CSR_WRITE_FLUSH(sc);
   3231 		break;
   3232 	case WM_T_PCH2:
   3233 	case WM_T_PCH_LPT:
   3234 	case WM_T_PCH_SPT:
   3235 		if (idx == 0) {
   3236 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3237 			CSR_WRITE_FLUSH(sc);
   3238 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3239 			CSR_WRITE_FLUSH(sc);
   3240 			return;
   3241 		}
   3242 		if (sc->sc_type != WM_T_PCH2) {
   3243 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3244 			    FWSM_WLOCK_MAC);
   3245 			addrl = WMREG_SHRAL(idx - 1);
   3246 			addrh = WMREG_SHRAH(idx - 1);
   3247 		} else {
   3248 			wlock_mac = 0;
   3249 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3250 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3251 		}
   3252 
   3253 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3254 			rv = wm_get_swflag_ich8lan(sc);
   3255 			if (rv != 0)
   3256 				return;
   3257 			CSR_WRITE(sc, addrl, ral_lo);
   3258 			CSR_WRITE_FLUSH(sc);
   3259 			CSR_WRITE(sc, addrh, ral_hi);
   3260 			CSR_WRITE_FLUSH(sc);
   3261 			wm_put_swflag_ich8lan(sc);
   3262 		}
   3263 
   3264 		break;
   3265 	default:
   3266 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3267 		CSR_WRITE_FLUSH(sc);
   3268 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3269 		CSR_WRITE_FLUSH(sc);
   3270 		break;
   3271 	}
   3272 }
   3273 
   3274 /*
   3275  * wm_mchash:
   3276  *
   3277  *	Compute the hash of the multicast address for the 4096-bit
   3278  *	multicast filter.
   3279  */
   3280 static uint32_t
   3281 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3282 {
   3283 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3284 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3285 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3286 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3287 	uint32_t hash;
   3288 
   3289 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3290 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3291 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3292 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3293 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3294 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3295 		return (hash & 0x3ff);
   3296 	}
   3297 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3298 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3299 
   3300 	return (hash & 0xfff);
   3301 }
   3302 
   3303 /*
   3304  * wm_set_filter:
   3305  *
   3306  *	Set up the receive filter.
   3307  */
   3308 static void
   3309 wm_set_filter(struct wm_softc *sc)
   3310 {
   3311 	struct ethercom *ec = &sc->sc_ethercom;
   3312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3313 	struct ether_multi *enm;
   3314 	struct ether_multistep step;
   3315 	bus_addr_t mta_reg;
   3316 	uint32_t hash, reg, bit;
   3317 	int i, size, ralmax;
   3318 
   3319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3320 		device_xname(sc->sc_dev), __func__));
   3321 
   3322 	if (sc->sc_type >= WM_T_82544)
   3323 		mta_reg = WMREG_CORDOVA_MTA;
   3324 	else
   3325 		mta_reg = WMREG_MTA;
   3326 
   3327 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3328 
   3329 	if (ifp->if_flags & IFF_BROADCAST)
   3330 		sc->sc_rctl |= RCTL_BAM;
   3331 	if (ifp->if_flags & IFF_PROMISC) {
   3332 		sc->sc_rctl |= RCTL_UPE;
   3333 		goto allmulti;
   3334 	}
   3335 
   3336 	/*
   3337 	 * Set the station address in the first RAL slot, and
   3338 	 * clear the remaining slots.
   3339 	 */
   3340 	if (sc->sc_type == WM_T_ICH8)
   3341 		size = WM_RAL_TABSIZE_ICH8 -1;
   3342 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3343 	    || (sc->sc_type == WM_T_PCH))
   3344 		size = WM_RAL_TABSIZE_ICH8;
   3345 	else if (sc->sc_type == WM_T_PCH2)
   3346 		size = WM_RAL_TABSIZE_PCH2;
   3347 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3348 		size = WM_RAL_TABSIZE_PCH_LPT;
   3349 	else if (sc->sc_type == WM_T_82575)
   3350 		size = WM_RAL_TABSIZE_82575;
   3351 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3352 		size = WM_RAL_TABSIZE_82576;
   3353 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3354 		size = WM_RAL_TABSIZE_I350;
   3355 	else
   3356 		size = WM_RAL_TABSIZE;
   3357 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3358 
   3359 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3360 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3361 		switch (i) {
   3362 		case 0:
   3363 			/* We can use all entries */
   3364 			ralmax = size;
   3365 			break;
   3366 		case 1:
   3367 			/* Only RAR[0] */
   3368 			ralmax = 1;
   3369 			break;
   3370 		default:
   3371 			/* available SHRA + RAR[0] */
   3372 			ralmax = i + 1;
   3373 		}
   3374 	} else
   3375 		ralmax = size;
   3376 	for (i = 1; i < size; i++) {
   3377 		if (i < ralmax)
   3378 			wm_set_ral(sc, NULL, i);
   3379 	}
   3380 
   3381 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3382 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3383 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3384 	    || (sc->sc_type == WM_T_PCH_SPT))
   3385 		size = WM_ICH8_MC_TABSIZE;
   3386 	else
   3387 		size = WM_MC_TABSIZE;
   3388 	/* Clear out the multicast table. */
   3389 	for (i = 0; i < size; i++) {
   3390 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3391 		CSR_WRITE_FLUSH(sc);
   3392 	}
   3393 
   3394 	ETHER_LOCK(ec);
   3395 	ETHER_FIRST_MULTI(step, ec, enm);
   3396 	while (enm != NULL) {
   3397 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3398 			ETHER_UNLOCK(ec);
   3399 			/*
   3400 			 * We must listen to a range of multicast addresses.
   3401 			 * For now, just accept all multicasts, rather than
   3402 			 * trying to set only those filter bits needed to match
   3403 			 * the range.  (At this time, the only use of address
   3404 			 * ranges is for IP multicast routing, for which the
   3405 			 * range is big enough to require all bits set.)
   3406 			 */
   3407 			goto allmulti;
   3408 		}
   3409 
   3410 		hash = wm_mchash(sc, enm->enm_addrlo);
   3411 
   3412 		reg = (hash >> 5);
   3413 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3414 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3415 		    || (sc->sc_type == WM_T_PCH2)
   3416 		    || (sc->sc_type == WM_T_PCH_LPT)
   3417 		    || (sc->sc_type == WM_T_PCH_SPT))
   3418 			reg &= 0x1f;
   3419 		else
   3420 			reg &= 0x7f;
   3421 		bit = hash & 0x1f;
   3422 
   3423 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3424 		hash |= 1U << bit;
   3425 
   3426 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3427 			/*
   3428 			 * 82544 Errata 9: Certain register cannot be written
   3429 			 * with particular alignments in PCI-X bus operation
   3430 			 * (FCAH, MTA and VFTA).
   3431 			 */
   3432 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3433 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3434 			CSR_WRITE_FLUSH(sc);
   3435 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3436 			CSR_WRITE_FLUSH(sc);
   3437 		} else {
   3438 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3439 			CSR_WRITE_FLUSH(sc);
   3440 		}
   3441 
   3442 		ETHER_NEXT_MULTI(step, enm);
   3443 	}
   3444 	ETHER_UNLOCK(ec);
   3445 
   3446 	ifp->if_flags &= ~IFF_ALLMULTI;
   3447 	goto setit;
   3448 
   3449  allmulti:
   3450 	ifp->if_flags |= IFF_ALLMULTI;
   3451 	sc->sc_rctl |= RCTL_MPE;
   3452 
   3453  setit:
   3454 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3455 }
   3456 
   3457 /* Reset and init related */
   3458 
   3459 static void
   3460 wm_set_vlan(struct wm_softc *sc)
   3461 {
   3462 
   3463 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3464 		device_xname(sc->sc_dev), __func__));
   3465 
   3466 	/* Deal with VLAN enables. */
   3467 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3468 		sc->sc_ctrl |= CTRL_VME;
   3469 	else
   3470 		sc->sc_ctrl &= ~CTRL_VME;
   3471 
   3472 	/* Write the control registers. */
   3473 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3474 }
   3475 
   3476 static void
   3477 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3478 {
   3479 	uint32_t gcr;
   3480 	pcireg_t ctrl2;
   3481 
   3482 	gcr = CSR_READ(sc, WMREG_GCR);
   3483 
   3484 	/* Only take action if timeout value is defaulted to 0 */
   3485 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3486 		goto out;
   3487 
   3488 	if ((gcr & GCR_CAP_VER2) == 0) {
   3489 		gcr |= GCR_CMPL_TMOUT_10MS;
   3490 		goto out;
   3491 	}
   3492 
   3493 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3494 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3495 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3496 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3497 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3498 
   3499 out:
   3500 	/* Disable completion timeout resend */
   3501 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3502 
   3503 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3504 }
   3505 
   3506 void
   3507 wm_get_auto_rd_done(struct wm_softc *sc)
   3508 {
   3509 	int i;
   3510 
   3511 	/* wait for eeprom to reload */
   3512 	switch (sc->sc_type) {
   3513 	case WM_T_82571:
   3514 	case WM_T_82572:
   3515 	case WM_T_82573:
   3516 	case WM_T_82574:
   3517 	case WM_T_82583:
   3518 	case WM_T_82575:
   3519 	case WM_T_82576:
   3520 	case WM_T_82580:
   3521 	case WM_T_I350:
   3522 	case WM_T_I354:
   3523 	case WM_T_I210:
   3524 	case WM_T_I211:
   3525 	case WM_T_80003:
   3526 	case WM_T_ICH8:
   3527 	case WM_T_ICH9:
   3528 		for (i = 0; i < 10; i++) {
   3529 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3530 				break;
   3531 			delay(1000);
   3532 		}
   3533 		if (i == 10) {
   3534 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3535 			    "complete\n", device_xname(sc->sc_dev));
   3536 		}
   3537 		break;
   3538 	default:
   3539 		break;
   3540 	}
   3541 }
   3542 
   3543 void
   3544 wm_lan_init_done(struct wm_softc *sc)
   3545 {
   3546 	uint32_t reg = 0;
   3547 	int i;
   3548 
   3549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3550 		device_xname(sc->sc_dev), __func__));
   3551 
   3552 	/* Wait for eeprom to reload */
   3553 	switch (sc->sc_type) {
   3554 	case WM_T_ICH10:
   3555 	case WM_T_PCH:
   3556 	case WM_T_PCH2:
   3557 	case WM_T_PCH_LPT:
   3558 	case WM_T_PCH_SPT:
   3559 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3560 			reg = CSR_READ(sc, WMREG_STATUS);
   3561 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3562 				break;
   3563 			delay(100);
   3564 		}
   3565 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3566 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3567 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3568 		}
   3569 		break;
   3570 	default:
   3571 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3572 		    __func__);
   3573 		break;
   3574 	}
   3575 
   3576 	reg &= ~STATUS_LAN_INIT_DONE;
   3577 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3578 }
   3579 
   3580 void
   3581 wm_get_cfg_done(struct wm_softc *sc)
   3582 {
   3583 	int mask;
   3584 	uint32_t reg;
   3585 	int i;
   3586 
   3587 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3588 		device_xname(sc->sc_dev), __func__));
   3589 
   3590 	/* Wait for eeprom to reload */
   3591 	switch (sc->sc_type) {
   3592 	case WM_T_82542_2_0:
   3593 	case WM_T_82542_2_1:
   3594 		/* null */
   3595 		break;
   3596 	case WM_T_82543:
   3597 	case WM_T_82544:
   3598 	case WM_T_82540:
   3599 	case WM_T_82545:
   3600 	case WM_T_82545_3:
   3601 	case WM_T_82546:
   3602 	case WM_T_82546_3:
   3603 	case WM_T_82541:
   3604 	case WM_T_82541_2:
   3605 	case WM_T_82547:
   3606 	case WM_T_82547_2:
   3607 	case WM_T_82573:
   3608 	case WM_T_82574:
   3609 	case WM_T_82583:
   3610 		/* generic */
   3611 		delay(10*1000);
   3612 		break;
   3613 	case WM_T_80003:
   3614 	case WM_T_82571:
   3615 	case WM_T_82572:
   3616 	case WM_T_82575:
   3617 	case WM_T_82576:
   3618 	case WM_T_82580:
   3619 	case WM_T_I350:
   3620 	case WM_T_I354:
   3621 	case WM_T_I210:
   3622 	case WM_T_I211:
   3623 		if (sc->sc_type == WM_T_82571) {
   3624 			/* Only 82571 shares port 0 */
   3625 			mask = EEMNGCTL_CFGDONE_0;
   3626 		} else
   3627 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3628 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3629 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3630 				break;
   3631 			delay(1000);
   3632 		}
   3633 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3634 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3635 				device_xname(sc->sc_dev), __func__));
   3636 		}
   3637 		break;
   3638 	case WM_T_ICH8:
   3639 	case WM_T_ICH9:
   3640 	case WM_T_ICH10:
   3641 	case WM_T_PCH:
   3642 	case WM_T_PCH2:
   3643 	case WM_T_PCH_LPT:
   3644 	case WM_T_PCH_SPT:
   3645 		delay(10*1000);
   3646 		if (sc->sc_type >= WM_T_ICH10)
   3647 			wm_lan_init_done(sc);
   3648 		else
   3649 			wm_get_auto_rd_done(sc);
   3650 
   3651 		reg = CSR_READ(sc, WMREG_STATUS);
   3652 		if ((reg & STATUS_PHYRA) != 0)
   3653 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3654 		break;
   3655 	default:
   3656 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3657 		    __func__);
   3658 		break;
   3659 	}
   3660 }
   3661 
   3662 void
   3663 wm_phy_post_reset(struct wm_softc *sc)
   3664 {
   3665 	uint32_t reg;
   3666 
   3667 	/* This function is only for ICH8 and newer. */
   3668 	if (sc->sc_type < WM_T_ICH8)
   3669 		return;
   3670 
   3671 	if (wm_phy_resetisblocked(sc)) {
   3672 		/* XXX */
   3673 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3674 		return;
   3675 	}
   3676 
   3677 	/* Allow time for h/w to get to quiescent state after reset */
   3678 	delay(10*1000);
   3679 
   3680 	/* Perform any necessary post-reset workarounds */
   3681 	if (sc->sc_type == WM_T_PCH)
   3682 		wm_hv_phy_workaround_ich8lan(sc);
   3683 	if (sc->sc_type == WM_T_PCH2)
   3684 		wm_lv_phy_workaround_ich8lan(sc);
   3685 
   3686 	/* Clear the host wakeup bit after lcd reset */
   3687 	if (sc->sc_type >= WM_T_PCH) {
   3688 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3689 		    BM_PORT_GEN_CFG);
   3690 		reg &= ~BM_WUC_HOST_WU_BIT;
   3691 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3692 		    BM_PORT_GEN_CFG, reg);
   3693 	}
   3694 
   3695 	/*
   3696 	 * XXX Configure the LCD with th extended configuration region
   3697 	 * in NVM
   3698 	 */
   3699 
   3700 	/* Configure the LCD with the OEM bits in NVM */
   3701 }
   3702 
   3703 /* Init hardware bits */
   3704 void
   3705 wm_initialize_hardware_bits(struct wm_softc *sc)
   3706 {
   3707 	uint32_t tarc0, tarc1, reg;
   3708 
   3709 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3710 		device_xname(sc->sc_dev), __func__));
   3711 
   3712 	/* For 82571 variant, 80003 and ICHs */
   3713 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3714 	    || (sc->sc_type >= WM_T_80003)) {
   3715 
   3716 		/* Transmit Descriptor Control 0 */
   3717 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3718 		reg |= TXDCTL_COUNT_DESC;
   3719 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3720 
   3721 		/* Transmit Descriptor Control 1 */
   3722 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3723 		reg |= TXDCTL_COUNT_DESC;
   3724 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3725 
   3726 		/* TARC0 */
   3727 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3728 		switch (sc->sc_type) {
   3729 		case WM_T_82571:
   3730 		case WM_T_82572:
   3731 		case WM_T_82573:
   3732 		case WM_T_82574:
   3733 		case WM_T_82583:
   3734 		case WM_T_80003:
   3735 			/* Clear bits 30..27 */
   3736 			tarc0 &= ~__BITS(30, 27);
   3737 			break;
   3738 		default:
   3739 			break;
   3740 		}
   3741 
   3742 		switch (sc->sc_type) {
   3743 		case WM_T_82571:
   3744 		case WM_T_82572:
   3745 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3746 
   3747 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3748 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3749 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3750 			/* 8257[12] Errata No.7 */
   3751 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3752 
   3753 			/* TARC1 bit 28 */
   3754 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3755 				tarc1 &= ~__BIT(28);
   3756 			else
   3757 				tarc1 |= __BIT(28);
   3758 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3759 
   3760 			/*
   3761 			 * 8257[12] Errata No.13
   3762 			 * Disable Dyamic Clock Gating.
   3763 			 */
   3764 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3765 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3766 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3767 			break;
   3768 		case WM_T_82573:
   3769 		case WM_T_82574:
   3770 		case WM_T_82583:
   3771 			if ((sc->sc_type == WM_T_82574)
   3772 			    || (sc->sc_type == WM_T_82583))
   3773 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3774 
   3775 			/* Extended Device Control */
   3776 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3777 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3778 			reg |= __BIT(22);	/* Set bit 22 */
   3779 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3780 
   3781 			/* Device Control */
   3782 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3783 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3784 
   3785 			/* PCIe Control Register */
   3786 			/*
   3787 			 * 82573 Errata (unknown).
   3788 			 *
   3789 			 * 82574 Errata 25 and 82583 Errata 12
   3790 			 * "Dropped Rx Packets":
   3791 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3792 			 */
   3793 			reg = CSR_READ(sc, WMREG_GCR);
   3794 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3795 			CSR_WRITE(sc, WMREG_GCR, reg);
   3796 
   3797 			if ((sc->sc_type == WM_T_82574)
   3798 			    || (sc->sc_type == WM_T_82583)) {
   3799 				/*
   3800 				 * Document says this bit must be set for
   3801 				 * proper operation.
   3802 				 */
   3803 				reg = CSR_READ(sc, WMREG_GCR);
   3804 				reg |= __BIT(22);
   3805 				CSR_WRITE(sc, WMREG_GCR, reg);
   3806 
   3807 				/*
   3808 				 * Apply workaround for hardware errata
   3809 				 * documented in errata docs Fixes issue where
   3810 				 * some error prone or unreliable PCIe
   3811 				 * completions are occurring, particularly
   3812 				 * with ASPM enabled. Without fix, issue can
   3813 				 * cause Tx timeouts.
   3814 				 */
   3815 				reg = CSR_READ(sc, WMREG_GCR2);
   3816 				reg |= __BIT(0);
   3817 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3818 			}
   3819 			break;
   3820 		case WM_T_80003:
   3821 			/* TARC0 */
   3822 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3823 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3824 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3825 
   3826 			/* TARC1 bit 28 */
   3827 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3828 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3829 				tarc1 &= ~__BIT(28);
   3830 			else
   3831 				tarc1 |= __BIT(28);
   3832 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3833 			break;
   3834 		case WM_T_ICH8:
   3835 		case WM_T_ICH9:
   3836 		case WM_T_ICH10:
   3837 		case WM_T_PCH:
   3838 		case WM_T_PCH2:
   3839 		case WM_T_PCH_LPT:
   3840 		case WM_T_PCH_SPT:
   3841 			/* TARC0 */
   3842 			if ((sc->sc_type == WM_T_ICH8)
   3843 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3844 				/* Set TARC0 bits 29 and 28 */
   3845 				tarc0 |= __BITS(29, 28);
   3846 			}
   3847 			/* Set TARC0 bits 23,24,26,27 */
   3848 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3849 
   3850 			/* CTRL_EXT */
   3851 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3852 			reg |= __BIT(22);	/* Set bit 22 */
   3853 			/*
   3854 			 * Enable PHY low-power state when MAC is at D3
   3855 			 * w/o WoL
   3856 			 */
   3857 			if (sc->sc_type >= WM_T_PCH)
   3858 				reg |= CTRL_EXT_PHYPDEN;
   3859 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3860 
   3861 			/* TARC1 */
   3862 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3863 			/* bit 28 */
   3864 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3865 				tarc1 &= ~__BIT(28);
   3866 			else
   3867 				tarc1 |= __BIT(28);
   3868 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3869 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3870 
   3871 			/* Device Status */
   3872 			if (sc->sc_type == WM_T_ICH8) {
   3873 				reg = CSR_READ(sc, WMREG_STATUS);
   3874 				reg &= ~__BIT(31);
   3875 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3876 
   3877 			}
   3878 
   3879 			/* IOSFPC */
   3880 			if (sc->sc_type == WM_T_PCH_SPT) {
   3881 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3882 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3883 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3884 			}
   3885 			/*
   3886 			 * Work-around descriptor data corruption issue during
   3887 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3888 			 * capability.
   3889 			 */
   3890 			reg = CSR_READ(sc, WMREG_RFCTL);
   3891 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3892 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3893 			break;
   3894 		default:
   3895 			break;
   3896 		}
   3897 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3898 
   3899 		switch (sc->sc_type) {
   3900 		/*
   3901 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3902 		 * Avoid RSS Hash Value bug.
   3903 		 */
   3904 		case WM_T_82571:
   3905 		case WM_T_82572:
   3906 		case WM_T_82573:
   3907 		case WM_T_80003:
   3908 		case WM_T_ICH8:
   3909 			reg = CSR_READ(sc, WMREG_RFCTL);
   3910 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3911 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3912 			break;
   3913 		case WM_T_82574:
   3914 			/* use extened Rx descriptor. */
   3915 			reg = CSR_READ(sc, WMREG_RFCTL);
   3916 			reg |= WMREG_RFCTL_EXSTEN;
   3917 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3918 			break;
   3919 		default:
   3920 			break;
   3921 		}
   3922 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3923 		/*
   3924 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3925 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3926 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3927 		 * Correctly by the Device"
   3928 		 *
   3929 		 * I354(C2000) Errata AVR53:
   3930 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3931 		 * Hang"
   3932 		 */
   3933 		reg = CSR_READ(sc, WMREG_RFCTL);
   3934 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3935 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3936 	}
   3937 }
   3938 
   3939 static uint32_t
   3940 wm_rxpbs_adjust_82580(uint32_t val)
   3941 {
   3942 	uint32_t rv = 0;
   3943 
   3944 	if (val < __arraycount(wm_82580_rxpbs_table))
   3945 		rv = wm_82580_rxpbs_table[val];
   3946 
   3947 	return rv;
   3948 }
   3949 
   3950 /*
   3951  * wm_reset_phy:
   3952  *
   3953  *	generic PHY reset function.
   3954  *	Same as e1000_phy_hw_reset_generic()
   3955  */
   3956 static void
   3957 wm_reset_phy(struct wm_softc *sc)
   3958 {
   3959 	uint32_t reg;
   3960 
   3961 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3962 		device_xname(sc->sc_dev), __func__));
   3963 	if (wm_phy_resetisblocked(sc))
   3964 		return;
   3965 
   3966 	sc->phy.acquire(sc);
   3967 
   3968 	reg = CSR_READ(sc, WMREG_CTRL);
   3969 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3970 	CSR_WRITE_FLUSH(sc);
   3971 
   3972 	delay(sc->phy.reset_delay_us);
   3973 
   3974 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3975 	CSR_WRITE_FLUSH(sc);
   3976 
   3977 	delay(150);
   3978 
   3979 	sc->phy.release(sc);
   3980 
   3981 	wm_get_cfg_done(sc);
   3982 	wm_phy_post_reset(sc);
   3983 }
   3984 
   3985 static void
   3986 wm_flush_desc_rings(struct wm_softc *sc)
   3987 {
   3988 	pcireg_t preg;
   3989 	uint32_t reg;
   3990 	int nexttx;
   3991 
   3992 	/* First, disable MULR fix in FEXTNVM11 */
   3993 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3994 	reg |= FEXTNVM11_DIS_MULRFIX;
   3995 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3996 
   3997 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3998 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3999 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   4000 		struct wm_txqueue *txq;
   4001 		wiseman_txdesc_t *txd;
   4002 
   4003 		/* TX */
   4004 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4005 		    device_xname(sc->sc_dev), preg, reg);
   4006 		reg = CSR_READ(sc, WMREG_TCTL);
   4007 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4008 
   4009 		txq = &sc->sc_queue[0].wmq_txq;
   4010 		nexttx = txq->txq_next;
   4011 		txd = &txq->txq_descs[nexttx];
   4012 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4013 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4014 		txd->wtx_fields.wtxu_status = 0;
   4015 		txd->wtx_fields.wtxu_options = 0;
   4016 		txd->wtx_fields.wtxu_vlan = 0;
   4017 
   4018 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4019 			BUS_SPACE_BARRIER_WRITE);
   4020 
   4021 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4022 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4023 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4024 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4025 		delay(250);
   4026 	}
   4027 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4028 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   4029 		uint32_t rctl;
   4030 
   4031 		/* RX */
   4032 		printf("%s: Need RX flush (reg = %08x)\n",
   4033 		    device_xname(sc->sc_dev), preg);
   4034 		rctl = CSR_READ(sc, WMREG_RCTL);
   4035 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4036 		CSR_WRITE_FLUSH(sc);
   4037 		delay(150);
   4038 
   4039 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4040 		/* zero the lower 14 bits (prefetch and host thresholds) */
   4041 		reg &= 0xffffc000;
   4042 		/*
   4043 		 * update thresholds: prefetch threshold to 31, host threshold
   4044 		 * to 1 and make sure the granularity is "descriptors" and not
   4045 		 * "cache lines"
   4046 		 */
   4047 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4048 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4049 
   4050 		/*
   4051 		 * momentarily enable the RX ring for the changes to take
   4052 		 * effect
   4053 		 */
   4054 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4055 		CSR_WRITE_FLUSH(sc);
   4056 		delay(150);
   4057 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4058 	}
   4059 }
   4060 
   4061 /*
   4062  * wm_reset:
   4063  *
   4064  *	Reset the i82542 chip.
   4065  */
   4066 static void
   4067 wm_reset(struct wm_softc *sc)
   4068 {
   4069 	int phy_reset = 0;
   4070 	int i, error = 0;
   4071 	uint32_t reg;
   4072 
   4073 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4074 		device_xname(sc->sc_dev), __func__));
   4075 	KASSERT(sc->sc_type != 0);
   4076 
   4077 	/*
   4078 	 * Allocate on-chip memory according to the MTU size.
   4079 	 * The Packet Buffer Allocation register must be written
   4080 	 * before the chip is reset.
   4081 	 */
   4082 	switch (sc->sc_type) {
   4083 	case WM_T_82547:
   4084 	case WM_T_82547_2:
   4085 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4086 		    PBA_22K : PBA_30K;
   4087 		for (i = 0; i < sc->sc_nqueues; i++) {
   4088 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4089 			txq->txq_fifo_head = 0;
   4090 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4091 			txq->txq_fifo_size =
   4092 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4093 			txq->txq_fifo_stall = 0;
   4094 		}
   4095 		break;
   4096 	case WM_T_82571:
   4097 	case WM_T_82572:
   4098 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4099 	case WM_T_80003:
   4100 		sc->sc_pba = PBA_32K;
   4101 		break;
   4102 	case WM_T_82573:
   4103 		sc->sc_pba = PBA_12K;
   4104 		break;
   4105 	case WM_T_82574:
   4106 	case WM_T_82583:
   4107 		sc->sc_pba = PBA_20K;
   4108 		break;
   4109 	case WM_T_82576:
   4110 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4111 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4112 		break;
   4113 	case WM_T_82580:
   4114 	case WM_T_I350:
   4115 	case WM_T_I354:
   4116 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4117 		break;
   4118 	case WM_T_I210:
   4119 	case WM_T_I211:
   4120 		sc->sc_pba = PBA_34K;
   4121 		break;
   4122 	case WM_T_ICH8:
   4123 		/* Workaround for a bit corruption issue in FIFO memory */
   4124 		sc->sc_pba = PBA_8K;
   4125 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4126 		break;
   4127 	case WM_T_ICH9:
   4128 	case WM_T_ICH10:
   4129 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4130 		    PBA_14K : PBA_10K;
   4131 		break;
   4132 	case WM_T_PCH:
   4133 	case WM_T_PCH2:
   4134 	case WM_T_PCH_LPT:
   4135 	case WM_T_PCH_SPT:
   4136 		sc->sc_pba = PBA_26K;
   4137 		break;
   4138 	default:
   4139 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4140 		    PBA_40K : PBA_48K;
   4141 		break;
   4142 	}
   4143 	/*
   4144 	 * Only old or non-multiqueue devices have the PBA register
   4145 	 * XXX Need special handling for 82575.
   4146 	 */
   4147 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4148 	    || (sc->sc_type == WM_T_82575))
   4149 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4150 
   4151 	/* Prevent the PCI-E bus from sticking */
   4152 	if (sc->sc_flags & WM_F_PCIE) {
   4153 		int timeout = 800;
   4154 
   4155 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4157 
   4158 		while (timeout--) {
   4159 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4160 			    == 0)
   4161 				break;
   4162 			delay(100);
   4163 		}
   4164 		if (timeout == 0)
   4165 			device_printf(sc->sc_dev,
   4166 			    "failed to disable busmastering\n");
   4167 	}
   4168 
   4169 	/* Set the completion timeout for interface */
   4170 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4171 	    || (sc->sc_type == WM_T_82580)
   4172 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4173 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4174 		wm_set_pcie_completion_timeout(sc);
   4175 
   4176 	/* Clear interrupt */
   4177 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4178 	if (wm_is_using_msix(sc)) {
   4179 		if (sc->sc_type != WM_T_82574) {
   4180 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4181 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4182 		} else {
   4183 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4184 		}
   4185 	}
   4186 
   4187 	/* Stop the transmit and receive processes. */
   4188 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4189 	sc->sc_rctl &= ~RCTL_EN;
   4190 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4191 	CSR_WRITE_FLUSH(sc);
   4192 
   4193 	/* XXX set_tbi_sbp_82543() */
   4194 
   4195 	delay(10*1000);
   4196 
   4197 	/* Must acquire the MDIO ownership before MAC reset */
   4198 	switch (sc->sc_type) {
   4199 	case WM_T_82573:
   4200 	case WM_T_82574:
   4201 	case WM_T_82583:
   4202 		error = wm_get_hw_semaphore_82573(sc);
   4203 		break;
   4204 	default:
   4205 		break;
   4206 	}
   4207 
   4208 	/*
   4209 	 * 82541 Errata 29? & 82547 Errata 28?
   4210 	 * See also the description about PHY_RST bit in CTRL register
   4211 	 * in 8254x_GBe_SDM.pdf.
   4212 	 */
   4213 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4214 		CSR_WRITE(sc, WMREG_CTRL,
   4215 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4216 		CSR_WRITE_FLUSH(sc);
   4217 		delay(5000);
   4218 	}
   4219 
   4220 	switch (sc->sc_type) {
   4221 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4222 	case WM_T_82541:
   4223 	case WM_T_82541_2:
   4224 	case WM_T_82547:
   4225 	case WM_T_82547_2:
   4226 		/*
   4227 		 * On some chipsets, a reset through a memory-mapped write
   4228 		 * cycle can cause the chip to reset before completing the
   4229 		 * write cycle.  This causes major headache that can be
   4230 		 * avoided by issuing the reset via indirect register writes
   4231 		 * through I/O space.
   4232 		 *
   4233 		 * So, if we successfully mapped the I/O BAR at attach time,
   4234 		 * use that.  Otherwise, try our luck with a memory-mapped
   4235 		 * reset.
   4236 		 */
   4237 		if (sc->sc_flags & WM_F_IOH_VALID)
   4238 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4239 		else
   4240 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4241 		break;
   4242 	case WM_T_82545_3:
   4243 	case WM_T_82546_3:
   4244 		/* Use the shadow control register on these chips. */
   4245 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4246 		break;
   4247 	case WM_T_80003:
   4248 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4249 		sc->phy.acquire(sc);
   4250 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4251 		sc->phy.release(sc);
   4252 		break;
   4253 	case WM_T_ICH8:
   4254 	case WM_T_ICH9:
   4255 	case WM_T_ICH10:
   4256 	case WM_T_PCH:
   4257 	case WM_T_PCH2:
   4258 	case WM_T_PCH_LPT:
   4259 	case WM_T_PCH_SPT:
   4260 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4261 		if (wm_phy_resetisblocked(sc) == false) {
   4262 			/*
   4263 			 * Gate automatic PHY configuration by hardware on
   4264 			 * non-managed 82579
   4265 			 */
   4266 			if ((sc->sc_type == WM_T_PCH2)
   4267 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4268 				== 0))
   4269 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4270 
   4271 			reg |= CTRL_PHY_RESET;
   4272 			phy_reset = 1;
   4273 		} else
   4274 			printf("XXX reset is blocked!!!\n");
   4275 		sc->phy.acquire(sc);
   4276 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4277 		/* Don't insert a completion barrier when reset */
   4278 		delay(20*1000);
   4279 		mutex_exit(sc->sc_ich_phymtx);
   4280 		break;
   4281 	case WM_T_82580:
   4282 	case WM_T_I350:
   4283 	case WM_T_I354:
   4284 	case WM_T_I210:
   4285 	case WM_T_I211:
   4286 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4287 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4288 			CSR_WRITE_FLUSH(sc);
   4289 		delay(5000);
   4290 		break;
   4291 	case WM_T_82542_2_0:
   4292 	case WM_T_82542_2_1:
   4293 	case WM_T_82543:
   4294 	case WM_T_82540:
   4295 	case WM_T_82545:
   4296 	case WM_T_82546:
   4297 	case WM_T_82571:
   4298 	case WM_T_82572:
   4299 	case WM_T_82573:
   4300 	case WM_T_82574:
   4301 	case WM_T_82575:
   4302 	case WM_T_82576:
   4303 	case WM_T_82583:
   4304 	default:
   4305 		/* Everything else can safely use the documented method. */
   4306 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4307 		break;
   4308 	}
   4309 
   4310 	/* Must release the MDIO ownership after MAC reset */
   4311 	switch (sc->sc_type) {
   4312 	case WM_T_82573:
   4313 	case WM_T_82574:
   4314 	case WM_T_82583:
   4315 		if (error == 0)
   4316 			wm_put_hw_semaphore_82573(sc);
   4317 		break;
   4318 	default:
   4319 		break;
   4320 	}
   4321 
   4322 	if (phy_reset != 0)
   4323 		wm_get_cfg_done(sc);
   4324 
   4325 	/* reload EEPROM */
   4326 	switch (sc->sc_type) {
   4327 	case WM_T_82542_2_0:
   4328 	case WM_T_82542_2_1:
   4329 	case WM_T_82543:
   4330 	case WM_T_82544:
   4331 		delay(10);
   4332 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4333 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4334 		CSR_WRITE_FLUSH(sc);
   4335 		delay(2000);
   4336 		break;
   4337 	case WM_T_82540:
   4338 	case WM_T_82545:
   4339 	case WM_T_82545_3:
   4340 	case WM_T_82546:
   4341 	case WM_T_82546_3:
   4342 		delay(5*1000);
   4343 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4344 		break;
   4345 	case WM_T_82541:
   4346 	case WM_T_82541_2:
   4347 	case WM_T_82547:
   4348 	case WM_T_82547_2:
   4349 		delay(20000);
   4350 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4351 		break;
   4352 	case WM_T_82571:
   4353 	case WM_T_82572:
   4354 	case WM_T_82573:
   4355 	case WM_T_82574:
   4356 	case WM_T_82583:
   4357 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4358 			delay(10);
   4359 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4360 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4361 			CSR_WRITE_FLUSH(sc);
   4362 		}
   4363 		/* check EECD_EE_AUTORD */
   4364 		wm_get_auto_rd_done(sc);
   4365 		/*
   4366 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4367 		 * is set.
   4368 		 */
   4369 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4370 		    || (sc->sc_type == WM_T_82583))
   4371 			delay(25*1000);
   4372 		break;
   4373 	case WM_T_82575:
   4374 	case WM_T_82576:
   4375 	case WM_T_82580:
   4376 	case WM_T_I350:
   4377 	case WM_T_I354:
   4378 	case WM_T_I210:
   4379 	case WM_T_I211:
   4380 	case WM_T_80003:
   4381 		/* check EECD_EE_AUTORD */
   4382 		wm_get_auto_rd_done(sc);
   4383 		break;
   4384 	case WM_T_ICH8:
   4385 	case WM_T_ICH9:
   4386 	case WM_T_ICH10:
   4387 	case WM_T_PCH:
   4388 	case WM_T_PCH2:
   4389 	case WM_T_PCH_LPT:
   4390 	case WM_T_PCH_SPT:
   4391 		break;
   4392 	default:
   4393 		panic("%s: unknown type\n", __func__);
   4394 	}
   4395 
   4396 	/* Check whether EEPROM is present or not */
   4397 	switch (sc->sc_type) {
   4398 	case WM_T_82575:
   4399 	case WM_T_82576:
   4400 	case WM_T_82580:
   4401 	case WM_T_I350:
   4402 	case WM_T_I354:
   4403 	case WM_T_ICH8:
   4404 	case WM_T_ICH9:
   4405 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4406 			/* Not found */
   4407 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4408 			if (sc->sc_type == WM_T_82575)
   4409 				wm_reset_init_script_82575(sc);
   4410 		}
   4411 		break;
   4412 	default:
   4413 		break;
   4414 	}
   4415 
   4416 	if (phy_reset != 0)
   4417 		wm_phy_post_reset(sc);
   4418 
   4419 	if ((sc->sc_type == WM_T_82580)
   4420 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4421 		/* clear global device reset status bit */
   4422 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4423 	}
   4424 
   4425 	/* Clear any pending interrupt events. */
   4426 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4427 	reg = CSR_READ(sc, WMREG_ICR);
   4428 	if (wm_is_using_msix(sc)) {
   4429 		if (sc->sc_type != WM_T_82574) {
   4430 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4431 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4432 		} else
   4433 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4434 	}
   4435 
   4436 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4437 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4438 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4439 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4440 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4441 		reg |= KABGTXD_BGSQLBIAS;
   4442 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4443 	}
   4444 
   4445 	/* reload sc_ctrl */
   4446 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4447 
   4448 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4449 		wm_set_eee_i350(sc);
   4450 
   4451 	/*
   4452 	 * For PCH, this write will make sure that any noise will be detected
   4453 	 * as a CRC error and be dropped rather than show up as a bad packet
   4454 	 * to the DMA engine
   4455 	 */
   4456 	if (sc->sc_type == WM_T_PCH)
   4457 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4458 
   4459 	if (sc->sc_type >= WM_T_82544)
   4460 		CSR_WRITE(sc, WMREG_WUC, 0);
   4461 
   4462 	wm_reset_mdicnfg_82580(sc);
   4463 
   4464 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4465 		wm_pll_workaround_i210(sc);
   4466 }
   4467 
   4468 /*
   4469  * wm_add_rxbuf:
   4470  *
   4471  *	Add a receive buffer to the indiciated descriptor.
   4472  */
   4473 static int
   4474 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4475 {
   4476 	struct wm_softc *sc = rxq->rxq_sc;
   4477 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4478 	struct mbuf *m;
   4479 	int error;
   4480 
   4481 	KASSERT(mutex_owned(rxq->rxq_lock));
   4482 
   4483 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4484 	if (m == NULL)
   4485 		return ENOBUFS;
   4486 
   4487 	MCLGET(m, M_DONTWAIT);
   4488 	if ((m->m_flags & M_EXT) == 0) {
   4489 		m_freem(m);
   4490 		return ENOBUFS;
   4491 	}
   4492 
   4493 	if (rxs->rxs_mbuf != NULL)
   4494 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4495 
   4496 	rxs->rxs_mbuf = m;
   4497 
   4498 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4499 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4500 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4501 	if (error) {
   4502 		/* XXX XXX XXX */
   4503 		aprint_error_dev(sc->sc_dev,
   4504 		    "unable to load rx DMA map %d, error = %d\n",
   4505 		    idx, error);
   4506 		panic("wm_add_rxbuf");
   4507 	}
   4508 
   4509 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4510 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4511 
   4512 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4513 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4514 			wm_init_rxdesc(rxq, idx);
   4515 	} else
   4516 		wm_init_rxdesc(rxq, idx);
   4517 
   4518 	return 0;
   4519 }
   4520 
   4521 /*
   4522  * wm_rxdrain:
   4523  *
   4524  *	Drain the receive queue.
   4525  */
   4526 static void
   4527 wm_rxdrain(struct wm_rxqueue *rxq)
   4528 {
   4529 	struct wm_softc *sc = rxq->rxq_sc;
   4530 	struct wm_rxsoft *rxs;
   4531 	int i;
   4532 
   4533 	KASSERT(mutex_owned(rxq->rxq_lock));
   4534 
   4535 	for (i = 0; i < WM_NRXDESC; i++) {
   4536 		rxs = &rxq->rxq_soft[i];
   4537 		if (rxs->rxs_mbuf != NULL) {
   4538 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4539 			m_freem(rxs->rxs_mbuf);
   4540 			rxs->rxs_mbuf = NULL;
   4541 		}
   4542 	}
   4543 }
   4544 
   4545 
   4546 /*
   4547  * XXX copy from FreeBSD's sys/net/rss_config.c
   4548  */
   4549 /*
   4550  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4551  * effectiveness may be limited by algorithm choice and available entropy
   4552  * during the boot.
   4553  *
   4554  * XXXRW: And that we don't randomize it yet!
   4555  *
   4556  * This is the default Microsoft RSS specification key which is also
   4557  * the Chelsio T5 firmware default key.
   4558  */
   4559 #define RSS_KEYSIZE 40
   4560 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4561 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4562 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4563 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4564 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4565 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4566 };
   4567 
   4568 /*
   4569  * Caller must pass an array of size sizeof(rss_key).
   4570  *
   4571  * XXX
   4572  * As if_ixgbe may use this function, this function should not be
   4573  * if_wm specific function.
   4574  */
   4575 static void
   4576 wm_rss_getkey(uint8_t *key)
   4577 {
   4578 
   4579 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4580 }
   4581 
   4582 /*
   4583  * Setup registers for RSS.
   4584  *
   4585  * XXX not yet VMDq support
   4586  */
   4587 static void
   4588 wm_init_rss(struct wm_softc *sc)
   4589 {
   4590 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4591 	int i;
   4592 
   4593 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4594 
   4595 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4596 		int qid, reta_ent;
   4597 
   4598 		qid  = i % sc->sc_nqueues;
   4599 		switch(sc->sc_type) {
   4600 		case WM_T_82574:
   4601 			reta_ent = __SHIFTIN(qid,
   4602 			    RETA_ENT_QINDEX_MASK_82574);
   4603 			break;
   4604 		case WM_T_82575:
   4605 			reta_ent = __SHIFTIN(qid,
   4606 			    RETA_ENT_QINDEX1_MASK_82575);
   4607 			break;
   4608 		default:
   4609 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4610 			break;
   4611 		}
   4612 
   4613 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4614 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4615 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4616 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4617 	}
   4618 
   4619 	wm_rss_getkey((uint8_t *)rss_key);
   4620 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4621 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4622 
   4623 	if (sc->sc_type == WM_T_82574)
   4624 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4625 	else
   4626 		mrqc = MRQC_ENABLE_RSS_MQ;
   4627 
   4628 	/*
   4629 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4630 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4631 	 */
   4632 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4633 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4634 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4635 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4636 
   4637 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4638 }
   4639 
   4640 /*
   4641  * Adjust TX and RX queue numbers which the system actulally uses.
   4642  *
   4643  * The numbers are affected by below parameters.
   4644  *     - The nubmer of hardware queues
   4645  *     - The number of MSI-X vectors (= "nvectors" argument)
   4646  *     - ncpu
   4647  */
   4648 static void
   4649 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4650 {
   4651 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4652 
   4653 	if (nvectors < 2) {
   4654 		sc->sc_nqueues = 1;
   4655 		return;
   4656 	}
   4657 
   4658 	switch(sc->sc_type) {
   4659 	case WM_T_82572:
   4660 		hw_ntxqueues = 2;
   4661 		hw_nrxqueues = 2;
   4662 		break;
   4663 	case WM_T_82574:
   4664 		hw_ntxqueues = 2;
   4665 		hw_nrxqueues = 2;
   4666 		break;
   4667 	case WM_T_82575:
   4668 		hw_ntxqueues = 4;
   4669 		hw_nrxqueues = 4;
   4670 		break;
   4671 	case WM_T_82576:
   4672 		hw_ntxqueues = 16;
   4673 		hw_nrxqueues = 16;
   4674 		break;
   4675 	case WM_T_82580:
   4676 	case WM_T_I350:
   4677 	case WM_T_I354:
   4678 		hw_ntxqueues = 8;
   4679 		hw_nrxqueues = 8;
   4680 		break;
   4681 	case WM_T_I210:
   4682 		hw_ntxqueues = 4;
   4683 		hw_nrxqueues = 4;
   4684 		break;
   4685 	case WM_T_I211:
   4686 		hw_ntxqueues = 2;
   4687 		hw_nrxqueues = 2;
   4688 		break;
   4689 		/*
   4690 		 * As below ethernet controllers does not support MSI-X,
   4691 		 * this driver let them not use multiqueue.
   4692 		 *     - WM_T_80003
   4693 		 *     - WM_T_ICH8
   4694 		 *     - WM_T_ICH9
   4695 		 *     - WM_T_ICH10
   4696 		 *     - WM_T_PCH
   4697 		 *     - WM_T_PCH2
   4698 		 *     - WM_T_PCH_LPT
   4699 		 */
   4700 	default:
   4701 		hw_ntxqueues = 1;
   4702 		hw_nrxqueues = 1;
   4703 		break;
   4704 	}
   4705 
   4706 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4707 
   4708 	/*
   4709 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4710 	 * the number of queues used actually.
   4711 	 */
   4712 	if (nvectors < hw_nqueues + 1) {
   4713 		sc->sc_nqueues = nvectors - 1;
   4714 	} else {
   4715 		sc->sc_nqueues = hw_nqueues;
   4716 	}
   4717 
   4718 	/*
   4719 	 * As queues more then cpus cannot improve scaling, we limit
   4720 	 * the number of queues used actually.
   4721 	 */
   4722 	if (ncpu < sc->sc_nqueues)
   4723 		sc->sc_nqueues = ncpu;
   4724 }
   4725 
   4726 static inline bool
   4727 wm_is_using_msix(struct wm_softc *sc)
   4728 {
   4729 
   4730 	return (sc->sc_nintrs > 1);
   4731 }
   4732 
   4733 static inline bool
   4734 wm_is_using_multiqueue(struct wm_softc *sc)
   4735 {
   4736 
   4737 	return (sc->sc_nqueues > 1);
   4738 }
   4739 
   4740 static int
   4741 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4742 {
   4743 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4744 	wmq->wmq_id = qidx;
   4745 	wmq->wmq_intr_idx = intr_idx;
   4746 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4747 #ifdef WM_MPSAFE
   4748 	    | SOFTINT_MPSAFE
   4749 #endif
   4750 	    , wm_handle_queue, wmq);
   4751 	if (wmq->wmq_si != NULL)
   4752 		return 0;
   4753 
   4754 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4755 	    wmq->wmq_id);
   4756 
   4757 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4758 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4759 	return ENOMEM;
   4760 }
   4761 
   4762 /*
   4763  * Both single interrupt MSI and INTx can use this function.
   4764  */
   4765 static int
   4766 wm_setup_legacy(struct wm_softc *sc)
   4767 {
   4768 	pci_chipset_tag_t pc = sc->sc_pc;
   4769 	const char *intrstr = NULL;
   4770 	char intrbuf[PCI_INTRSTR_LEN];
   4771 	int error;
   4772 
   4773 	error = wm_alloc_txrx_queues(sc);
   4774 	if (error) {
   4775 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4776 		    error);
   4777 		return ENOMEM;
   4778 	}
   4779 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4780 	    sizeof(intrbuf));
   4781 #ifdef WM_MPSAFE
   4782 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4783 #endif
   4784 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4785 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4786 	if (sc->sc_ihs[0] == NULL) {
   4787 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4788 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4789 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4790 		return ENOMEM;
   4791 	}
   4792 
   4793 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4794 	sc->sc_nintrs = 1;
   4795 
   4796 	return wm_softint_establish(sc, 0, 0);
   4797 }
   4798 
   4799 static int
   4800 wm_setup_msix(struct wm_softc *sc)
   4801 {
   4802 	void *vih;
   4803 	kcpuset_t *affinity;
   4804 	int qidx, error, intr_idx, txrx_established;
   4805 	pci_chipset_tag_t pc = sc->sc_pc;
   4806 	const char *intrstr = NULL;
   4807 	char intrbuf[PCI_INTRSTR_LEN];
   4808 	char intr_xname[INTRDEVNAMEBUF];
   4809 
   4810 	if (sc->sc_nqueues < ncpu) {
   4811 		/*
   4812 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4813 		 * interrupts start from CPU#1.
   4814 		 */
   4815 		sc->sc_affinity_offset = 1;
   4816 	} else {
   4817 		/*
   4818 		 * In this case, this device use all CPUs. So, we unify
   4819 		 * affinitied cpu_index to msix vector number for readability.
   4820 		 */
   4821 		sc->sc_affinity_offset = 0;
   4822 	}
   4823 
   4824 	error = wm_alloc_txrx_queues(sc);
   4825 	if (error) {
   4826 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4827 		    error);
   4828 		return ENOMEM;
   4829 	}
   4830 
   4831 	kcpuset_create(&affinity, false);
   4832 	intr_idx = 0;
   4833 
   4834 	/*
   4835 	 * TX and RX
   4836 	 */
   4837 	txrx_established = 0;
   4838 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4839 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4840 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4841 
   4842 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4843 		    sizeof(intrbuf));
   4844 #ifdef WM_MPSAFE
   4845 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4846 		    PCI_INTR_MPSAFE, true);
   4847 #endif
   4848 		memset(intr_xname, 0, sizeof(intr_xname));
   4849 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4850 		    device_xname(sc->sc_dev), qidx);
   4851 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4852 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4853 		if (vih == NULL) {
   4854 			aprint_error_dev(sc->sc_dev,
   4855 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4856 			    intrstr ? " at " : "",
   4857 			    intrstr ? intrstr : "");
   4858 
   4859 			goto fail;
   4860 		}
   4861 		kcpuset_zero(affinity);
   4862 		/* Round-robin affinity */
   4863 		kcpuset_set(affinity, affinity_to);
   4864 		error = interrupt_distribute(vih, affinity, NULL);
   4865 		if (error == 0) {
   4866 			aprint_normal_dev(sc->sc_dev,
   4867 			    "for TX and RX interrupting at %s affinity to %u\n",
   4868 			    intrstr, affinity_to);
   4869 		} else {
   4870 			aprint_normal_dev(sc->sc_dev,
   4871 			    "for TX and RX interrupting at %s\n", intrstr);
   4872 		}
   4873 		sc->sc_ihs[intr_idx] = vih;
   4874 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4875 			goto fail;
   4876 		txrx_established++;
   4877 		intr_idx++;
   4878 	}
   4879 
   4880 	/*
   4881 	 * LINK
   4882 	 */
   4883 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4884 	    sizeof(intrbuf));
   4885 #ifdef WM_MPSAFE
   4886 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4887 #endif
   4888 	memset(intr_xname, 0, sizeof(intr_xname));
   4889 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4890 	    device_xname(sc->sc_dev));
   4891 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4892 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4893 	if (vih == NULL) {
   4894 		aprint_error_dev(sc->sc_dev,
   4895 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4896 		    intrstr ? " at " : "",
   4897 		    intrstr ? intrstr : "");
   4898 
   4899 		goto fail;
   4900 	}
   4901 	/* keep default affinity to LINK interrupt */
   4902 	aprint_normal_dev(sc->sc_dev,
   4903 	    "for LINK interrupting at %s\n", intrstr);
   4904 	sc->sc_ihs[intr_idx] = vih;
   4905 	sc->sc_link_intr_idx = intr_idx;
   4906 
   4907 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4908 	kcpuset_destroy(affinity);
   4909 	return 0;
   4910 
   4911  fail:
   4912 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4913 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4914 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4915 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4916 	}
   4917 
   4918 	kcpuset_destroy(affinity);
   4919 	return ENOMEM;
   4920 }
   4921 
   4922 static void
   4923 wm_turnon(struct wm_softc *sc)
   4924 {
   4925 	int i;
   4926 
   4927 	KASSERT(WM_CORE_LOCKED(sc));
   4928 
   4929 	/*
   4930 	 * must unset stopping flags in ascending order.
   4931 	 */
   4932 	for(i = 0; i < sc->sc_nqueues; i++) {
   4933 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4934 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4935 
   4936 		mutex_enter(txq->txq_lock);
   4937 		txq->txq_stopping = false;
   4938 		mutex_exit(txq->txq_lock);
   4939 
   4940 		mutex_enter(rxq->rxq_lock);
   4941 		rxq->rxq_stopping = false;
   4942 		mutex_exit(rxq->rxq_lock);
   4943 	}
   4944 
   4945 	sc->sc_core_stopping = false;
   4946 }
   4947 
   4948 static void
   4949 wm_turnoff(struct wm_softc *sc)
   4950 {
   4951 	int i;
   4952 
   4953 	KASSERT(WM_CORE_LOCKED(sc));
   4954 
   4955 	sc->sc_core_stopping = true;
   4956 
   4957 	/*
   4958 	 * must set stopping flags in ascending order.
   4959 	 */
   4960 	for(i = 0; i < sc->sc_nqueues; i++) {
   4961 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4962 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4963 
   4964 		mutex_enter(rxq->rxq_lock);
   4965 		rxq->rxq_stopping = true;
   4966 		mutex_exit(rxq->rxq_lock);
   4967 
   4968 		mutex_enter(txq->txq_lock);
   4969 		txq->txq_stopping = true;
   4970 		mutex_exit(txq->txq_lock);
   4971 	}
   4972 }
   4973 
   4974 /*
   4975  * write interrupt interval value to ITR or EITR
   4976  */
   4977 static void
   4978 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4979 {
   4980 
   4981 	if (!wmq->wmq_set_itr)
   4982 		return;
   4983 
   4984 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4985 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4986 
   4987 		/*
   4988 		 * 82575 doesn't have CNT_INGR field.
   4989 		 * So, overwrite counter field by software.
   4990 		 */
   4991 		if (sc->sc_type == WM_T_82575)
   4992 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4993 		else
   4994 			eitr |= EITR_CNT_INGR;
   4995 
   4996 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4997 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4998 		/*
   4999 		 * 82574 has both ITR and EITR. SET EITR when we use
   5000 		 * the multi queue function with MSI-X.
   5001 		 */
   5002 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5003 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5004 	} else {
   5005 		KASSERT(wmq->wmq_id == 0);
   5006 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5007 	}
   5008 
   5009 	wmq->wmq_set_itr = false;
   5010 }
   5011 
   5012 /*
   5013  * TODO
   5014  * Below dynamic calculation of itr is almost the same as linux igb,
   5015  * however it does not fit to wm(4). So, we will have been disable AIM
   5016  * until we will find appropriate calculation of itr.
   5017  */
   5018 /*
   5019  * calculate interrupt interval value to be going to write register in
   5020  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5021  */
   5022 static void
   5023 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5024 {
   5025 #ifdef NOTYET
   5026 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5027 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5028 	uint32_t avg_size = 0;
   5029 	uint32_t new_itr;
   5030 
   5031 	if (rxq->rxq_packets)
   5032 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5033 	if (txq->txq_packets)
   5034 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5035 
   5036 	if (avg_size == 0) {
   5037 		new_itr = 450; /* restore default value */
   5038 		goto out;
   5039 	}
   5040 
   5041 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5042 	avg_size += 24;
   5043 
   5044 	/* Don't starve jumbo frames */
   5045 	avg_size = min(avg_size, 3000);
   5046 
   5047 	/* Give a little boost to mid-size frames */
   5048 	if ((avg_size > 300) && (avg_size < 1200))
   5049 		new_itr = avg_size / 3;
   5050 	else
   5051 		new_itr = avg_size / 2;
   5052 
   5053 out:
   5054 	/*
   5055 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5056 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5057 	 */
   5058 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5059 		new_itr *= 4;
   5060 
   5061 	if (new_itr != wmq->wmq_itr) {
   5062 		wmq->wmq_itr = new_itr;
   5063 		wmq->wmq_set_itr = true;
   5064 	} else
   5065 		wmq->wmq_set_itr = false;
   5066 
   5067 	rxq->rxq_packets = 0;
   5068 	rxq->rxq_bytes = 0;
   5069 	txq->txq_packets = 0;
   5070 	txq->txq_bytes = 0;
   5071 #endif
   5072 }
   5073 
   5074 /*
   5075  * wm_init:		[ifnet interface function]
   5076  *
   5077  *	Initialize the interface.
   5078  */
   5079 static int
   5080 wm_init(struct ifnet *ifp)
   5081 {
   5082 	struct wm_softc *sc = ifp->if_softc;
   5083 	int ret;
   5084 
   5085 	WM_CORE_LOCK(sc);
   5086 	ret = wm_init_locked(ifp);
   5087 	WM_CORE_UNLOCK(sc);
   5088 
   5089 	return ret;
   5090 }
   5091 
   5092 static int
   5093 wm_init_locked(struct ifnet *ifp)
   5094 {
   5095 	struct wm_softc *sc = ifp->if_softc;
   5096 	int i, j, trynum, error = 0;
   5097 	uint32_t reg;
   5098 
   5099 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5100 		device_xname(sc->sc_dev), __func__));
   5101 	KASSERT(WM_CORE_LOCKED(sc));
   5102 
   5103 	/*
   5104 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5105 	 * There is a small but measurable benefit to avoiding the adjusment
   5106 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5107 	 * on such platforms.  One possibility is that the DMA itself is
   5108 	 * slightly more efficient if the front of the entire packet (instead
   5109 	 * of the front of the headers) is aligned.
   5110 	 *
   5111 	 * Note we must always set align_tweak to 0 if we are using
   5112 	 * jumbo frames.
   5113 	 */
   5114 #ifdef __NO_STRICT_ALIGNMENT
   5115 	sc->sc_align_tweak = 0;
   5116 #else
   5117 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5118 		sc->sc_align_tweak = 0;
   5119 	else
   5120 		sc->sc_align_tweak = 2;
   5121 #endif /* __NO_STRICT_ALIGNMENT */
   5122 
   5123 	/* Cancel any pending I/O. */
   5124 	wm_stop_locked(ifp, 0);
   5125 
   5126 	/* update statistics before reset */
   5127 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5128 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5129 
   5130 	/* PCH_SPT hardware workaround */
   5131 	if (sc->sc_type == WM_T_PCH_SPT)
   5132 		wm_flush_desc_rings(sc);
   5133 
   5134 	/* Reset the chip to a known state. */
   5135 	wm_reset(sc);
   5136 
   5137 	/*
   5138 	 * AMT based hardware can now take control from firmware
   5139 	 * Do this after reset.
   5140 	 */
   5141 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5142 		wm_get_hw_control(sc);
   5143 
   5144 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5145 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5146 		wm_legacy_irq_quirk_spt(sc);
   5147 
   5148 	/* Init hardware bits */
   5149 	wm_initialize_hardware_bits(sc);
   5150 
   5151 	/* Reset the PHY. */
   5152 	if (sc->sc_flags & WM_F_HAS_MII)
   5153 		wm_gmii_reset(sc);
   5154 
   5155 	/* Calculate (E)ITR value */
   5156 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5157 		/*
   5158 		 * For NEWQUEUE's EITR (except for 82575).
   5159 		 * 82575's EITR should be set same throttling value as other
   5160 		 * old controllers' ITR because the interrupt/sec calculation
   5161 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5162 		 *
   5163 		 * 82574's EITR should be set same throttling value as ITR.
   5164 		 *
   5165 		 * For N interrupts/sec, set this value to:
   5166 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5167 		 */
   5168 		sc->sc_itr_init = 450;
   5169 	} else if (sc->sc_type >= WM_T_82543) {
   5170 		/*
   5171 		 * Set up the interrupt throttling register (units of 256ns)
   5172 		 * Note that a footnote in Intel's documentation says this
   5173 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5174 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5175 		 * that that is also true for the 1024ns units of the other
   5176 		 * interrupt-related timer registers -- so, really, we ought
   5177 		 * to divide this value by 4 when the link speed is low.
   5178 		 *
   5179 		 * XXX implement this division at link speed change!
   5180 		 */
   5181 
   5182 		/*
   5183 		 * For N interrupts/sec, set this value to:
   5184 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5185 		 * absolute and packet timer values to this value
   5186 		 * divided by 4 to get "simple timer" behavior.
   5187 		 */
   5188 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5189 	}
   5190 
   5191 	error = wm_init_txrx_queues(sc);
   5192 	if (error)
   5193 		goto out;
   5194 
   5195 	/*
   5196 	 * Clear out the VLAN table -- we don't use it (yet).
   5197 	 */
   5198 	CSR_WRITE(sc, WMREG_VET, 0);
   5199 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5200 		trynum = 10; /* Due to hw errata */
   5201 	else
   5202 		trynum = 1;
   5203 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5204 		for (j = 0; j < trynum; j++)
   5205 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5206 
   5207 	/*
   5208 	 * Set up flow-control parameters.
   5209 	 *
   5210 	 * XXX Values could probably stand some tuning.
   5211 	 */
   5212 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5213 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5214 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5215 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5216 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5217 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5218 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5219 	}
   5220 
   5221 	sc->sc_fcrtl = FCRTL_DFLT;
   5222 	if (sc->sc_type < WM_T_82543) {
   5223 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5224 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5225 	} else {
   5226 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5227 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5228 	}
   5229 
   5230 	if (sc->sc_type == WM_T_80003)
   5231 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5232 	else
   5233 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5234 
   5235 	/* Writes the control register. */
   5236 	wm_set_vlan(sc);
   5237 
   5238 	if (sc->sc_flags & WM_F_HAS_MII) {
   5239 		int val;
   5240 
   5241 		switch (sc->sc_type) {
   5242 		case WM_T_80003:
   5243 		case WM_T_ICH8:
   5244 		case WM_T_ICH9:
   5245 		case WM_T_ICH10:
   5246 		case WM_T_PCH:
   5247 		case WM_T_PCH2:
   5248 		case WM_T_PCH_LPT:
   5249 		case WM_T_PCH_SPT:
   5250 			/*
   5251 			 * Set the mac to wait the maximum time between each
   5252 			 * iteration and increase the max iterations when
   5253 			 * polling the phy; this fixes erroneous timeouts at
   5254 			 * 10Mbps.
   5255 			 */
   5256 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5257 			    0xFFFF);
   5258 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5259 			val |= 0x3F;
   5260 			wm_kmrn_writereg(sc,
   5261 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5262 			break;
   5263 		default:
   5264 			break;
   5265 		}
   5266 
   5267 		if (sc->sc_type == WM_T_80003) {
   5268 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5269 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5270 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5271 
   5272 			/* Bypass RX and TX FIFO's */
   5273 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5274 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5275 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5276 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5277 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5278 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5279 		}
   5280 	}
   5281 #if 0
   5282 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5283 #endif
   5284 
   5285 	/* Set up checksum offload parameters. */
   5286 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5287 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5288 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5289 		reg |= RXCSUM_IPOFL;
   5290 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5291 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5292 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5293 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5294 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5295 
   5296 	/* Set registers about MSI-X */
   5297 	if (wm_is_using_msix(sc)) {
   5298 		uint32_t ivar;
   5299 		struct wm_queue *wmq;
   5300 		int qid, qintr_idx;
   5301 
   5302 		if (sc->sc_type == WM_T_82575) {
   5303 			/* Interrupt control */
   5304 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5305 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5306 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5307 
   5308 			/* TX and RX */
   5309 			for (i = 0; i < sc->sc_nqueues; i++) {
   5310 				wmq = &sc->sc_queue[i];
   5311 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5312 				    EITR_TX_QUEUE(wmq->wmq_id)
   5313 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5314 			}
   5315 			/* Link status */
   5316 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5317 			    EITR_OTHER);
   5318 		} else if (sc->sc_type == WM_T_82574) {
   5319 			/* Interrupt control */
   5320 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5321 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5322 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5323 
   5324 			/*
   5325 			 * workaround issue with spurious interrupts
   5326 			 * in MSI-X mode.
   5327 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5328 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5329 			 */
   5330 			reg = CSR_READ(sc, WMREG_RFCTL);
   5331 			reg |= WMREG_RFCTL_ACKDIS;
   5332 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5333 
   5334 			ivar = 0;
   5335 			/* TX and RX */
   5336 			for (i = 0; i < sc->sc_nqueues; i++) {
   5337 				wmq = &sc->sc_queue[i];
   5338 				qid = wmq->wmq_id;
   5339 				qintr_idx = wmq->wmq_intr_idx;
   5340 
   5341 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5342 				    IVAR_TX_MASK_Q_82574(qid));
   5343 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5344 				    IVAR_RX_MASK_Q_82574(qid));
   5345 			}
   5346 			/* Link status */
   5347 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5348 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5349 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5350 		} else {
   5351 			/* Interrupt control */
   5352 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5353 			    | GPIE_EIAME | GPIE_PBA);
   5354 
   5355 			switch (sc->sc_type) {
   5356 			case WM_T_82580:
   5357 			case WM_T_I350:
   5358 			case WM_T_I354:
   5359 			case WM_T_I210:
   5360 			case WM_T_I211:
   5361 				/* TX and RX */
   5362 				for (i = 0; i < sc->sc_nqueues; i++) {
   5363 					wmq = &sc->sc_queue[i];
   5364 					qid = wmq->wmq_id;
   5365 					qintr_idx = wmq->wmq_intr_idx;
   5366 
   5367 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5368 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5369 					ivar |= __SHIFTIN((qintr_idx
   5370 						| IVAR_VALID),
   5371 					    IVAR_TX_MASK_Q(qid));
   5372 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5373 					ivar |= __SHIFTIN((qintr_idx
   5374 						| IVAR_VALID),
   5375 					    IVAR_RX_MASK_Q(qid));
   5376 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5377 				}
   5378 				break;
   5379 			case WM_T_82576:
   5380 				/* TX and RX */
   5381 				for (i = 0; i < sc->sc_nqueues; i++) {
   5382 					wmq = &sc->sc_queue[i];
   5383 					qid = wmq->wmq_id;
   5384 					qintr_idx = wmq->wmq_intr_idx;
   5385 
   5386 					ivar = CSR_READ(sc,
   5387 					    WMREG_IVAR_Q_82576(qid));
   5388 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5389 					ivar |= __SHIFTIN((qintr_idx
   5390 						| IVAR_VALID),
   5391 					    IVAR_TX_MASK_Q_82576(qid));
   5392 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5393 					ivar |= __SHIFTIN((qintr_idx
   5394 						| IVAR_VALID),
   5395 					    IVAR_RX_MASK_Q_82576(qid));
   5396 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5397 					    ivar);
   5398 				}
   5399 				break;
   5400 			default:
   5401 				break;
   5402 			}
   5403 
   5404 			/* Link status */
   5405 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5406 			    IVAR_MISC_OTHER);
   5407 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5408 		}
   5409 
   5410 		if (wm_is_using_multiqueue(sc)) {
   5411 			wm_init_rss(sc);
   5412 
   5413 			/*
   5414 			** NOTE: Receive Full-Packet Checksum Offload
   5415 			** is mutually exclusive with Multiqueue. However
   5416 			** this is not the same as TCP/IP checksums which
   5417 			** still work.
   5418 			*/
   5419 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5420 			reg |= RXCSUM_PCSD;
   5421 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5422 		}
   5423 	}
   5424 
   5425 	/* Set up the interrupt registers. */
   5426 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5427 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5428 	    ICR_RXO | ICR_RXT0;
   5429 	if (wm_is_using_msix(sc)) {
   5430 		uint32_t mask;
   5431 		struct wm_queue *wmq;
   5432 
   5433 		switch (sc->sc_type) {
   5434 		case WM_T_82574:
   5435 			mask = 0;
   5436 			for (i = 0; i < sc->sc_nqueues; i++) {
   5437 				wmq = &sc->sc_queue[i];
   5438 				mask |= ICR_TXQ(wmq->wmq_id);
   5439 				mask |= ICR_RXQ(wmq->wmq_id);
   5440 			}
   5441 			mask |= ICR_OTHER;
   5442 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5443 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5444 			break;
   5445 		default:
   5446 			if (sc->sc_type == WM_T_82575) {
   5447 				mask = 0;
   5448 				for (i = 0; i < sc->sc_nqueues; i++) {
   5449 					wmq = &sc->sc_queue[i];
   5450 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5451 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5452 				}
   5453 				mask |= EITR_OTHER;
   5454 			} else {
   5455 				mask = 0;
   5456 				for (i = 0; i < sc->sc_nqueues; i++) {
   5457 					wmq = &sc->sc_queue[i];
   5458 					mask |= 1 << wmq->wmq_intr_idx;
   5459 				}
   5460 				mask |= 1 << sc->sc_link_intr_idx;
   5461 			}
   5462 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5463 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5464 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5465 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5466 			break;
   5467 		}
   5468 	} else
   5469 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5470 
   5471 	/* Set up the inter-packet gap. */
   5472 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5473 
   5474 	if (sc->sc_type >= WM_T_82543) {
   5475 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5476 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5477 			wm_itrs_writereg(sc, wmq);
   5478 		}
   5479 		/*
   5480 		 * Link interrupts occur much less than TX
   5481 		 * interrupts and RX interrupts. So, we don't
   5482 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5483 		 * FreeBSD's if_igb.
   5484 		 */
   5485 	}
   5486 
   5487 	/* Set the VLAN ethernetype. */
   5488 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5489 
   5490 	/*
   5491 	 * Set up the transmit control register; we start out with
   5492 	 * a collision distance suitable for FDX, but update it whe
   5493 	 * we resolve the media type.
   5494 	 */
   5495 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5496 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5497 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5498 	if (sc->sc_type >= WM_T_82571)
   5499 		sc->sc_tctl |= TCTL_MULR;
   5500 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5501 
   5502 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5503 		/* Write TDT after TCTL.EN is set. See the document. */
   5504 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5505 	}
   5506 
   5507 	if (sc->sc_type == WM_T_80003) {
   5508 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5509 		reg &= ~TCTL_EXT_GCEX_MASK;
   5510 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5511 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5512 	}
   5513 
   5514 	/* Set the media. */
   5515 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5516 		goto out;
   5517 
   5518 	/* Configure for OS presence */
   5519 	wm_init_manageability(sc);
   5520 
   5521 	/*
   5522 	 * Set up the receive control register; we actually program
   5523 	 * the register when we set the receive filter.  Use multicast
   5524 	 * address offset type 0.
   5525 	 *
   5526 	 * Only the i82544 has the ability to strip the incoming
   5527 	 * CRC, so we don't enable that feature.
   5528 	 */
   5529 	sc->sc_mchash_type = 0;
   5530 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5531 	    | RCTL_MO(sc->sc_mchash_type);
   5532 
   5533 	/*
   5534 	 * 82574 use one buffer extended Rx descriptor.
   5535 	 */
   5536 	if (sc->sc_type == WM_T_82574)
   5537 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5538 
   5539 	/*
   5540 	 * The I350 has a bug where it always strips the CRC whether
   5541 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5542 	 */
   5543 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5544 	    || (sc->sc_type == WM_T_I210))
   5545 		sc->sc_rctl |= RCTL_SECRC;
   5546 
   5547 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5548 	    && (ifp->if_mtu > ETHERMTU)) {
   5549 		sc->sc_rctl |= RCTL_LPE;
   5550 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5551 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5552 	}
   5553 
   5554 	if (MCLBYTES == 2048) {
   5555 		sc->sc_rctl |= RCTL_2k;
   5556 	} else {
   5557 		if (sc->sc_type >= WM_T_82543) {
   5558 			switch (MCLBYTES) {
   5559 			case 4096:
   5560 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5561 				break;
   5562 			case 8192:
   5563 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5564 				break;
   5565 			case 16384:
   5566 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5567 				break;
   5568 			default:
   5569 				panic("wm_init: MCLBYTES %d unsupported",
   5570 				    MCLBYTES);
   5571 				break;
   5572 			}
   5573 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5574 	}
   5575 
   5576 	/* Enable ECC */
   5577 	switch (sc->sc_type) {
   5578 	case WM_T_82571:
   5579 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5580 		reg |= PBA_ECC_CORR_EN;
   5581 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5582 		break;
   5583 	case WM_T_PCH_LPT:
   5584 	case WM_T_PCH_SPT:
   5585 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5586 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5587 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5588 
   5589 		sc->sc_ctrl |= CTRL_MEHE;
   5590 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5591 		break;
   5592 	default:
   5593 		break;
   5594 	}
   5595 
   5596 	/* On 575 and later set RDT only if RX enabled */
   5597 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5598 		int qidx;
   5599 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5600 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5601 			for (i = 0; i < WM_NRXDESC; i++) {
   5602 				mutex_enter(rxq->rxq_lock);
   5603 				wm_init_rxdesc(rxq, i);
   5604 				mutex_exit(rxq->rxq_lock);
   5605 
   5606 			}
   5607 		}
   5608 	}
   5609 
   5610 	/* Set the receive filter. */
   5611 	wm_set_filter(sc);
   5612 
   5613 	wm_turnon(sc);
   5614 
   5615 	/* Start the one second link check clock. */
   5616 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5617 
   5618 	/* ...all done! */
   5619 	ifp->if_flags |= IFF_RUNNING;
   5620 	ifp->if_flags &= ~IFF_OACTIVE;
   5621 
   5622  out:
   5623 	sc->sc_if_flags = ifp->if_flags;
   5624 	if (error)
   5625 		log(LOG_ERR, "%s: interface not running\n",
   5626 		    device_xname(sc->sc_dev));
   5627 	return error;
   5628 }
   5629 
   5630 /*
   5631  * wm_stop:		[ifnet interface function]
   5632  *
   5633  *	Stop transmission on the interface.
   5634  */
   5635 static void
   5636 wm_stop(struct ifnet *ifp, int disable)
   5637 {
   5638 	struct wm_softc *sc = ifp->if_softc;
   5639 
   5640 	WM_CORE_LOCK(sc);
   5641 	wm_stop_locked(ifp, disable);
   5642 	WM_CORE_UNLOCK(sc);
   5643 }
   5644 
   5645 static void
   5646 wm_stop_locked(struct ifnet *ifp, int disable)
   5647 {
   5648 	struct wm_softc *sc = ifp->if_softc;
   5649 	struct wm_txsoft *txs;
   5650 	int i, qidx;
   5651 
   5652 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5653 		device_xname(sc->sc_dev), __func__));
   5654 	KASSERT(WM_CORE_LOCKED(sc));
   5655 
   5656 	wm_turnoff(sc);
   5657 
   5658 	/* Stop the one second clock. */
   5659 	callout_stop(&sc->sc_tick_ch);
   5660 
   5661 	/* Stop the 82547 Tx FIFO stall check timer. */
   5662 	if (sc->sc_type == WM_T_82547)
   5663 		callout_stop(&sc->sc_txfifo_ch);
   5664 
   5665 	if (sc->sc_flags & WM_F_HAS_MII) {
   5666 		/* Down the MII. */
   5667 		mii_down(&sc->sc_mii);
   5668 	} else {
   5669 #if 0
   5670 		/* Should we clear PHY's status properly? */
   5671 		wm_reset(sc);
   5672 #endif
   5673 	}
   5674 
   5675 	/* Stop the transmit and receive processes. */
   5676 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5677 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5678 	sc->sc_rctl &= ~RCTL_EN;
   5679 
   5680 	/*
   5681 	 * Clear the interrupt mask to ensure the device cannot assert its
   5682 	 * interrupt line.
   5683 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5684 	 * service any currently pending or shared interrupt.
   5685 	 */
   5686 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5687 	sc->sc_icr = 0;
   5688 	if (wm_is_using_msix(sc)) {
   5689 		if (sc->sc_type != WM_T_82574) {
   5690 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5691 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5692 		} else
   5693 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5694 	}
   5695 
   5696 	/* Release any queued transmit buffers. */
   5697 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5698 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5699 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5700 		mutex_enter(txq->txq_lock);
   5701 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5702 			txs = &txq->txq_soft[i];
   5703 			if (txs->txs_mbuf != NULL) {
   5704 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5705 				m_freem(txs->txs_mbuf);
   5706 				txs->txs_mbuf = NULL;
   5707 			}
   5708 		}
   5709 		mutex_exit(txq->txq_lock);
   5710 	}
   5711 
   5712 	/* Mark the interface as down and cancel the watchdog timer. */
   5713 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5714 	ifp->if_timer = 0;
   5715 
   5716 	if (disable) {
   5717 		for (i = 0; i < sc->sc_nqueues; i++) {
   5718 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5719 			mutex_enter(rxq->rxq_lock);
   5720 			wm_rxdrain(rxq);
   5721 			mutex_exit(rxq->rxq_lock);
   5722 		}
   5723 	}
   5724 
   5725 #if 0 /* notyet */
   5726 	if (sc->sc_type >= WM_T_82544)
   5727 		CSR_WRITE(sc, WMREG_WUC, 0);
   5728 #endif
   5729 }
   5730 
   5731 static void
   5732 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5733 {
   5734 	struct mbuf *m;
   5735 	int i;
   5736 
   5737 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5738 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5739 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5740 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5741 		    m->m_data, m->m_len, m->m_flags);
   5742 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5743 	    i, i == 1 ? "" : "s");
   5744 }
   5745 
   5746 /*
   5747  * wm_82547_txfifo_stall:
   5748  *
   5749  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5750  *	reset the FIFO pointers, and restart packet transmission.
   5751  */
   5752 static void
   5753 wm_82547_txfifo_stall(void *arg)
   5754 {
   5755 	struct wm_softc *sc = arg;
   5756 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5757 
   5758 	mutex_enter(txq->txq_lock);
   5759 
   5760 	if (txq->txq_stopping)
   5761 		goto out;
   5762 
   5763 	if (txq->txq_fifo_stall) {
   5764 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5765 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5766 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5767 			/*
   5768 			 * Packets have drained.  Stop transmitter, reset
   5769 			 * FIFO pointers, restart transmitter, and kick
   5770 			 * the packet queue.
   5771 			 */
   5772 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5773 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5774 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5775 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5776 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5777 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5778 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5779 			CSR_WRITE_FLUSH(sc);
   5780 
   5781 			txq->txq_fifo_head = 0;
   5782 			txq->txq_fifo_stall = 0;
   5783 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5784 		} else {
   5785 			/*
   5786 			 * Still waiting for packets to drain; try again in
   5787 			 * another tick.
   5788 			 */
   5789 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5790 		}
   5791 	}
   5792 
   5793 out:
   5794 	mutex_exit(txq->txq_lock);
   5795 }
   5796 
   5797 /*
   5798  * wm_82547_txfifo_bugchk:
   5799  *
   5800  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5801  *	prevent enqueueing a packet that would wrap around the end
   5802  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5803  *
   5804  *	We do this by checking the amount of space before the end
   5805  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5806  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5807  *	the internal FIFO pointers to the beginning, and restart
   5808  *	transmission on the interface.
   5809  */
   5810 #define	WM_FIFO_HDR		0x10
   5811 #define	WM_82547_PAD_LEN	0x3e0
   5812 static int
   5813 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5814 {
   5815 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5816 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5817 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5818 
   5819 	/* Just return if already stalled. */
   5820 	if (txq->txq_fifo_stall)
   5821 		return 1;
   5822 
   5823 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5824 		/* Stall only occurs in half-duplex mode. */
   5825 		goto send_packet;
   5826 	}
   5827 
   5828 	if (len >= WM_82547_PAD_LEN + space) {
   5829 		txq->txq_fifo_stall = 1;
   5830 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5831 		return 1;
   5832 	}
   5833 
   5834  send_packet:
   5835 	txq->txq_fifo_head += len;
   5836 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5837 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5838 
   5839 	return 0;
   5840 }
   5841 
   5842 static int
   5843 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5844 {
   5845 	int error;
   5846 
   5847 	/*
   5848 	 * Allocate the control data structures, and create and load the
   5849 	 * DMA map for it.
   5850 	 *
   5851 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5852 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5853 	 * both sets within the same 4G segment.
   5854 	 */
   5855 	if (sc->sc_type < WM_T_82544)
   5856 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5857 	else
   5858 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5859 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5860 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5861 	else
   5862 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5863 
   5864 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5865 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5866 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5867 		aprint_error_dev(sc->sc_dev,
   5868 		    "unable to allocate TX control data, error = %d\n",
   5869 		    error);
   5870 		goto fail_0;
   5871 	}
   5872 
   5873 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5874 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5875 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5876 		aprint_error_dev(sc->sc_dev,
   5877 		    "unable to map TX control data, error = %d\n", error);
   5878 		goto fail_1;
   5879 	}
   5880 
   5881 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5882 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5883 		aprint_error_dev(sc->sc_dev,
   5884 		    "unable to create TX control data DMA map, error = %d\n",
   5885 		    error);
   5886 		goto fail_2;
   5887 	}
   5888 
   5889 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5890 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5891 		aprint_error_dev(sc->sc_dev,
   5892 		    "unable to load TX control data DMA map, error = %d\n",
   5893 		    error);
   5894 		goto fail_3;
   5895 	}
   5896 
   5897 	return 0;
   5898 
   5899  fail_3:
   5900 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5901  fail_2:
   5902 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5903 	    WM_TXDESCS_SIZE(txq));
   5904  fail_1:
   5905 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5906  fail_0:
   5907 	return error;
   5908 }
   5909 
   5910 static void
   5911 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5912 {
   5913 
   5914 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5915 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5916 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5917 	    WM_TXDESCS_SIZE(txq));
   5918 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5919 }
   5920 
   5921 static int
   5922 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5923 {
   5924 	int error;
   5925 	size_t rxq_descs_size;
   5926 
   5927 	/*
   5928 	 * Allocate the control data structures, and create and load the
   5929 	 * DMA map for it.
   5930 	 *
   5931 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5932 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5933 	 * both sets within the same 4G segment.
   5934 	 */
   5935 	rxq->rxq_ndesc = WM_NRXDESC;
   5936 	if (sc->sc_type == WM_T_82574)
   5937 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5938 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5939 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5940 	else
   5941 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5942 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5943 
   5944 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5945 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5946 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5947 		aprint_error_dev(sc->sc_dev,
   5948 		    "unable to allocate RX control data, error = %d\n",
   5949 		    error);
   5950 		goto fail_0;
   5951 	}
   5952 
   5953 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5954 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5955 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5956 		aprint_error_dev(sc->sc_dev,
   5957 		    "unable to map RX control data, error = %d\n", error);
   5958 		goto fail_1;
   5959 	}
   5960 
   5961 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5962 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5963 		aprint_error_dev(sc->sc_dev,
   5964 		    "unable to create RX control data DMA map, error = %d\n",
   5965 		    error);
   5966 		goto fail_2;
   5967 	}
   5968 
   5969 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5970 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5971 		aprint_error_dev(sc->sc_dev,
   5972 		    "unable to load RX control data DMA map, error = %d\n",
   5973 		    error);
   5974 		goto fail_3;
   5975 	}
   5976 
   5977 	return 0;
   5978 
   5979  fail_3:
   5980 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5981  fail_2:
   5982 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5983 	    rxq_descs_size);
   5984  fail_1:
   5985 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5986  fail_0:
   5987 	return error;
   5988 }
   5989 
   5990 static void
   5991 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5992 {
   5993 
   5994 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5995 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5996 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5997 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5998 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5999 }
   6000 
   6001 
   6002 static int
   6003 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6004 {
   6005 	int i, error;
   6006 
   6007 	/* Create the transmit buffer DMA maps. */
   6008 	WM_TXQUEUELEN(txq) =
   6009 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6010 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6011 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6012 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6013 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6014 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6015 			aprint_error_dev(sc->sc_dev,
   6016 			    "unable to create Tx DMA map %d, error = %d\n",
   6017 			    i, error);
   6018 			goto fail;
   6019 		}
   6020 	}
   6021 
   6022 	return 0;
   6023 
   6024  fail:
   6025 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6026 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6027 			bus_dmamap_destroy(sc->sc_dmat,
   6028 			    txq->txq_soft[i].txs_dmamap);
   6029 	}
   6030 	return error;
   6031 }
   6032 
   6033 static void
   6034 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6035 {
   6036 	int i;
   6037 
   6038 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6039 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6040 			bus_dmamap_destroy(sc->sc_dmat,
   6041 			    txq->txq_soft[i].txs_dmamap);
   6042 	}
   6043 }
   6044 
   6045 static int
   6046 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6047 {
   6048 	int i, error;
   6049 
   6050 	/* Create the receive buffer DMA maps. */
   6051 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6052 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6053 			    MCLBYTES, 0, 0,
   6054 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6055 			aprint_error_dev(sc->sc_dev,
   6056 			    "unable to create Rx DMA map %d error = %d\n",
   6057 			    i, error);
   6058 			goto fail;
   6059 		}
   6060 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6061 	}
   6062 
   6063 	return 0;
   6064 
   6065  fail:
   6066 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6067 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6068 			bus_dmamap_destroy(sc->sc_dmat,
   6069 			    rxq->rxq_soft[i].rxs_dmamap);
   6070 	}
   6071 	return error;
   6072 }
   6073 
   6074 static void
   6075 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6076 {
   6077 	int i;
   6078 
   6079 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6080 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6081 			bus_dmamap_destroy(sc->sc_dmat,
   6082 			    rxq->rxq_soft[i].rxs_dmamap);
   6083 	}
   6084 }
   6085 
   6086 /*
   6087  * wm_alloc_quques:
   6088  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6089  */
   6090 static int
   6091 wm_alloc_txrx_queues(struct wm_softc *sc)
   6092 {
   6093 	int i, error, tx_done, rx_done;
   6094 
   6095 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6096 	    KM_SLEEP);
   6097 	if (sc->sc_queue == NULL) {
   6098 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6099 		error = ENOMEM;
   6100 		goto fail_0;
   6101 	}
   6102 
   6103 	/*
   6104 	 * For transmission
   6105 	 */
   6106 	error = 0;
   6107 	tx_done = 0;
   6108 	for (i = 0; i < sc->sc_nqueues; i++) {
   6109 #ifdef WM_EVENT_COUNTERS
   6110 		int j;
   6111 		const char *xname;
   6112 #endif
   6113 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6114 		txq->txq_sc = sc;
   6115 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6116 
   6117 		error = wm_alloc_tx_descs(sc, txq);
   6118 		if (error)
   6119 			break;
   6120 		error = wm_alloc_tx_buffer(sc, txq);
   6121 		if (error) {
   6122 			wm_free_tx_descs(sc, txq);
   6123 			break;
   6124 		}
   6125 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6126 		if (txq->txq_interq == NULL) {
   6127 			wm_free_tx_descs(sc, txq);
   6128 			wm_free_tx_buffer(sc, txq);
   6129 			error = ENOMEM;
   6130 			break;
   6131 		}
   6132 
   6133 #ifdef WM_EVENT_COUNTERS
   6134 		xname = device_xname(sc->sc_dev);
   6135 
   6136 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6137 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6138 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6139 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6140 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6141 
   6142 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6143 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6144 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6145 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6146 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6147 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6148 
   6149 		for (j = 0; j < WM_NTXSEGS; j++) {
   6150 			snprintf(txq->txq_txseg_evcnt_names[j],
   6151 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6152 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6153 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6154 		}
   6155 
   6156 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6157 
   6158 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6159 #endif /* WM_EVENT_COUNTERS */
   6160 
   6161 		tx_done++;
   6162 	}
   6163 	if (error)
   6164 		goto fail_1;
   6165 
   6166 	/*
   6167 	 * For recieve
   6168 	 */
   6169 	error = 0;
   6170 	rx_done = 0;
   6171 	for (i = 0; i < sc->sc_nqueues; i++) {
   6172 #ifdef WM_EVENT_COUNTERS
   6173 		const char *xname;
   6174 #endif
   6175 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6176 		rxq->rxq_sc = sc;
   6177 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6178 
   6179 		error = wm_alloc_rx_descs(sc, rxq);
   6180 		if (error)
   6181 			break;
   6182 
   6183 		error = wm_alloc_rx_buffer(sc, rxq);
   6184 		if (error) {
   6185 			wm_free_rx_descs(sc, rxq);
   6186 			break;
   6187 		}
   6188 
   6189 #ifdef WM_EVENT_COUNTERS
   6190 		xname = device_xname(sc->sc_dev);
   6191 
   6192 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6193 
   6194 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6195 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6196 #endif /* WM_EVENT_COUNTERS */
   6197 
   6198 		rx_done++;
   6199 	}
   6200 	if (error)
   6201 		goto fail_2;
   6202 
   6203 	return 0;
   6204 
   6205  fail_2:
   6206 	for (i = 0; i < rx_done; i++) {
   6207 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6208 		wm_free_rx_buffer(sc, rxq);
   6209 		wm_free_rx_descs(sc, rxq);
   6210 		if (rxq->rxq_lock)
   6211 			mutex_obj_free(rxq->rxq_lock);
   6212 	}
   6213  fail_1:
   6214 	for (i = 0; i < tx_done; i++) {
   6215 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6216 		pcq_destroy(txq->txq_interq);
   6217 		wm_free_tx_buffer(sc, txq);
   6218 		wm_free_tx_descs(sc, txq);
   6219 		if (txq->txq_lock)
   6220 			mutex_obj_free(txq->txq_lock);
   6221 	}
   6222 
   6223 	kmem_free(sc->sc_queue,
   6224 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6225  fail_0:
   6226 	return error;
   6227 }
   6228 
   6229 /*
   6230  * wm_free_quques:
   6231  *	Free {tx,rx}descs and {tx,rx} buffers
   6232  */
   6233 static void
   6234 wm_free_txrx_queues(struct wm_softc *sc)
   6235 {
   6236 	int i;
   6237 
   6238 	for (i = 0; i < sc->sc_nqueues; i++) {
   6239 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6240 
   6241 #ifdef WM_EVENT_COUNTERS
   6242 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6243 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6244 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6245 #endif /* WM_EVENT_COUNTERS */
   6246 
   6247 		wm_free_rx_buffer(sc, rxq);
   6248 		wm_free_rx_descs(sc, rxq);
   6249 		if (rxq->rxq_lock)
   6250 			mutex_obj_free(rxq->rxq_lock);
   6251 	}
   6252 
   6253 	for (i = 0; i < sc->sc_nqueues; i++) {
   6254 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6255 		struct mbuf *m;
   6256 #ifdef WM_EVENT_COUNTERS
   6257 		int j;
   6258 
   6259 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6260 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6261 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6262 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6263 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6264 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6265 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6266 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6267 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6268 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6269 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6270 
   6271 		for (j = 0; j < WM_NTXSEGS; j++)
   6272 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6273 
   6274 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6275 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6276 #endif /* WM_EVENT_COUNTERS */
   6277 
   6278 		/* drain txq_interq */
   6279 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6280 			m_freem(m);
   6281 		pcq_destroy(txq->txq_interq);
   6282 
   6283 		wm_free_tx_buffer(sc, txq);
   6284 		wm_free_tx_descs(sc, txq);
   6285 		if (txq->txq_lock)
   6286 			mutex_obj_free(txq->txq_lock);
   6287 	}
   6288 
   6289 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6290 }
   6291 
   6292 static void
   6293 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6294 {
   6295 
   6296 	KASSERT(mutex_owned(txq->txq_lock));
   6297 
   6298 	/* Initialize the transmit descriptor ring. */
   6299 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6300 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6301 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6302 	txq->txq_free = WM_NTXDESC(txq);
   6303 	txq->txq_next = 0;
   6304 }
   6305 
   6306 static void
   6307 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6308     struct wm_txqueue *txq)
   6309 {
   6310 
   6311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6312 		device_xname(sc->sc_dev), __func__));
   6313 	KASSERT(mutex_owned(txq->txq_lock));
   6314 
   6315 	if (sc->sc_type < WM_T_82543) {
   6316 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6317 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6318 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6319 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6320 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6321 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6322 	} else {
   6323 		int qid = wmq->wmq_id;
   6324 
   6325 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6326 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6327 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6328 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6329 
   6330 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6331 			/*
   6332 			 * Don't write TDT before TCTL.EN is set.
   6333 			 * See the document.
   6334 			 */
   6335 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6336 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6337 			    | TXDCTL_WTHRESH(0));
   6338 		else {
   6339 			/* XXX should update with AIM? */
   6340 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6341 			if (sc->sc_type >= WM_T_82540) {
   6342 				/* should be same */
   6343 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6344 			}
   6345 
   6346 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6347 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6348 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6349 		}
   6350 	}
   6351 }
   6352 
   6353 static void
   6354 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6355 {
   6356 	int i;
   6357 
   6358 	KASSERT(mutex_owned(txq->txq_lock));
   6359 
   6360 	/* Initialize the transmit job descriptors. */
   6361 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6362 		txq->txq_soft[i].txs_mbuf = NULL;
   6363 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6364 	txq->txq_snext = 0;
   6365 	txq->txq_sdirty = 0;
   6366 }
   6367 
   6368 static void
   6369 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6370     struct wm_txqueue *txq)
   6371 {
   6372 
   6373 	KASSERT(mutex_owned(txq->txq_lock));
   6374 
   6375 	/*
   6376 	 * Set up some register offsets that are different between
   6377 	 * the i82542 and the i82543 and later chips.
   6378 	 */
   6379 	if (sc->sc_type < WM_T_82543)
   6380 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6381 	else
   6382 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6383 
   6384 	wm_init_tx_descs(sc, txq);
   6385 	wm_init_tx_regs(sc, wmq, txq);
   6386 	wm_init_tx_buffer(sc, txq);
   6387 }
   6388 
   6389 static void
   6390 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6391     struct wm_rxqueue *rxq)
   6392 {
   6393 
   6394 	KASSERT(mutex_owned(rxq->rxq_lock));
   6395 
   6396 	/*
   6397 	 * Initialize the receive descriptor and receive job
   6398 	 * descriptor rings.
   6399 	 */
   6400 	if (sc->sc_type < WM_T_82543) {
   6401 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6402 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6403 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6404 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6405 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6406 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6407 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6408 
   6409 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6410 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6411 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6412 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6413 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6414 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6415 	} else {
   6416 		int qid = wmq->wmq_id;
   6417 
   6418 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6419 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6420 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6421 
   6422 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6423 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6424 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6425 
   6426 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6427 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6428 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6429 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6430 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6431 			    | RXDCTL_WTHRESH(1));
   6432 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6433 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6434 		} else {
   6435 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6436 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6437 			/* XXX should update with AIM? */
   6438 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6439 			/* MUST be same */
   6440 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6441 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6442 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6443 		}
   6444 	}
   6445 }
   6446 
   6447 static int
   6448 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6449 {
   6450 	struct wm_rxsoft *rxs;
   6451 	int error, i;
   6452 
   6453 	KASSERT(mutex_owned(rxq->rxq_lock));
   6454 
   6455 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6456 		rxs = &rxq->rxq_soft[i];
   6457 		if (rxs->rxs_mbuf == NULL) {
   6458 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6459 				log(LOG_ERR, "%s: unable to allocate or map "
   6460 				    "rx buffer %d, error = %d\n",
   6461 				    device_xname(sc->sc_dev), i, error);
   6462 				/*
   6463 				 * XXX Should attempt to run with fewer receive
   6464 				 * XXX buffers instead of just failing.
   6465 				 */
   6466 				wm_rxdrain(rxq);
   6467 				return ENOMEM;
   6468 			}
   6469 		} else {
   6470 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6471 				wm_init_rxdesc(rxq, i);
   6472 			/*
   6473 			 * For 82575 and newer device, the RX descriptors
   6474 			 * must be initialized after the setting of RCTL.EN in
   6475 			 * wm_set_filter()
   6476 			 */
   6477 		}
   6478 	}
   6479 	rxq->rxq_ptr = 0;
   6480 	rxq->rxq_discard = 0;
   6481 	WM_RXCHAIN_RESET(rxq);
   6482 
   6483 	return 0;
   6484 }
   6485 
   6486 static int
   6487 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6488     struct wm_rxqueue *rxq)
   6489 {
   6490 
   6491 	KASSERT(mutex_owned(rxq->rxq_lock));
   6492 
   6493 	/*
   6494 	 * Set up some register offsets that are different between
   6495 	 * the i82542 and the i82543 and later chips.
   6496 	 */
   6497 	if (sc->sc_type < WM_T_82543)
   6498 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6499 	else
   6500 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6501 
   6502 	wm_init_rx_regs(sc, wmq, rxq);
   6503 	return wm_init_rx_buffer(sc, rxq);
   6504 }
   6505 
   6506 /*
   6507  * wm_init_quques:
   6508  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6509  */
   6510 static int
   6511 wm_init_txrx_queues(struct wm_softc *sc)
   6512 {
   6513 	int i, error = 0;
   6514 
   6515 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6516 		device_xname(sc->sc_dev), __func__));
   6517 
   6518 	for (i = 0; i < sc->sc_nqueues; i++) {
   6519 		struct wm_queue *wmq = &sc->sc_queue[i];
   6520 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6521 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6522 
   6523 		/*
   6524 		 * TODO
   6525 		 * Currently, use constant variable instead of AIM.
   6526 		 * Furthermore, the interrupt interval of multiqueue which use
   6527 		 * polling mode is less than default value.
   6528 		 * More tuning and AIM are required.
   6529 		 */
   6530 		if (wm_is_using_multiqueue(sc))
   6531 			wmq->wmq_itr = 50;
   6532 		else
   6533 			wmq->wmq_itr = sc->sc_itr_init;
   6534 		wmq->wmq_set_itr = true;
   6535 
   6536 		mutex_enter(txq->txq_lock);
   6537 		wm_init_tx_queue(sc, wmq, txq);
   6538 		mutex_exit(txq->txq_lock);
   6539 
   6540 		mutex_enter(rxq->rxq_lock);
   6541 		error = wm_init_rx_queue(sc, wmq, rxq);
   6542 		mutex_exit(rxq->rxq_lock);
   6543 		if (error)
   6544 			break;
   6545 	}
   6546 
   6547 	return error;
   6548 }
   6549 
   6550 /*
   6551  * wm_tx_offload:
   6552  *
   6553  *	Set up TCP/IP checksumming parameters for the
   6554  *	specified packet.
   6555  */
   6556 static int
   6557 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6558     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6559 {
   6560 	struct mbuf *m0 = txs->txs_mbuf;
   6561 	struct livengood_tcpip_ctxdesc *t;
   6562 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6563 	uint32_t ipcse;
   6564 	struct ether_header *eh;
   6565 	int offset, iphl;
   6566 	uint8_t fields;
   6567 
   6568 	/*
   6569 	 * XXX It would be nice if the mbuf pkthdr had offset
   6570 	 * fields for the protocol headers.
   6571 	 */
   6572 
   6573 	eh = mtod(m0, struct ether_header *);
   6574 	switch (htons(eh->ether_type)) {
   6575 	case ETHERTYPE_IP:
   6576 	case ETHERTYPE_IPV6:
   6577 		offset = ETHER_HDR_LEN;
   6578 		break;
   6579 
   6580 	case ETHERTYPE_VLAN:
   6581 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6582 		break;
   6583 
   6584 	default:
   6585 		/*
   6586 		 * Don't support this protocol or encapsulation.
   6587 		 */
   6588 		*fieldsp = 0;
   6589 		*cmdp = 0;
   6590 		return 0;
   6591 	}
   6592 
   6593 	if ((m0->m_pkthdr.csum_flags &
   6594 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6595 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6596 	} else {
   6597 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6598 	}
   6599 	ipcse = offset + iphl - 1;
   6600 
   6601 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6602 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6603 	seg = 0;
   6604 	fields = 0;
   6605 
   6606 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6607 		int hlen = offset + iphl;
   6608 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6609 
   6610 		if (__predict_false(m0->m_len <
   6611 				    (hlen + sizeof(struct tcphdr)))) {
   6612 			/*
   6613 			 * TCP/IP headers are not in the first mbuf; we need
   6614 			 * to do this the slow and painful way.  Let's just
   6615 			 * hope this doesn't happen very often.
   6616 			 */
   6617 			struct tcphdr th;
   6618 
   6619 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6620 
   6621 			m_copydata(m0, hlen, sizeof(th), &th);
   6622 			if (v4) {
   6623 				struct ip ip;
   6624 
   6625 				m_copydata(m0, offset, sizeof(ip), &ip);
   6626 				ip.ip_len = 0;
   6627 				m_copyback(m0,
   6628 				    offset + offsetof(struct ip, ip_len),
   6629 				    sizeof(ip.ip_len), &ip.ip_len);
   6630 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6631 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6632 			} else {
   6633 				struct ip6_hdr ip6;
   6634 
   6635 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6636 				ip6.ip6_plen = 0;
   6637 				m_copyback(m0,
   6638 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6639 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6640 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6641 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6642 			}
   6643 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6644 			    sizeof(th.th_sum), &th.th_sum);
   6645 
   6646 			hlen += th.th_off << 2;
   6647 		} else {
   6648 			/*
   6649 			 * TCP/IP headers are in the first mbuf; we can do
   6650 			 * this the easy way.
   6651 			 */
   6652 			struct tcphdr *th;
   6653 
   6654 			if (v4) {
   6655 				struct ip *ip =
   6656 				    (void *)(mtod(m0, char *) + offset);
   6657 				th = (void *)(mtod(m0, char *) + hlen);
   6658 
   6659 				ip->ip_len = 0;
   6660 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6661 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6662 			} else {
   6663 				struct ip6_hdr *ip6 =
   6664 				    (void *)(mtod(m0, char *) + offset);
   6665 				th = (void *)(mtod(m0, char *) + hlen);
   6666 
   6667 				ip6->ip6_plen = 0;
   6668 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6669 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6670 			}
   6671 			hlen += th->th_off << 2;
   6672 		}
   6673 
   6674 		if (v4) {
   6675 			WM_Q_EVCNT_INCR(txq, txtso);
   6676 			cmdlen |= WTX_TCPIP_CMD_IP;
   6677 		} else {
   6678 			WM_Q_EVCNT_INCR(txq, txtso6);
   6679 			ipcse = 0;
   6680 		}
   6681 		cmd |= WTX_TCPIP_CMD_TSE;
   6682 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6683 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6684 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6685 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6686 	}
   6687 
   6688 	/*
   6689 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6690 	 * offload feature, if we load the context descriptor, we
   6691 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6692 	 */
   6693 
   6694 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6695 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6696 	    WTX_TCPIP_IPCSE(ipcse);
   6697 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6698 		WM_Q_EVCNT_INCR(txq, txipsum);
   6699 		fields |= WTX_IXSM;
   6700 	}
   6701 
   6702 	offset += iphl;
   6703 
   6704 	if (m0->m_pkthdr.csum_flags &
   6705 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6706 		WM_Q_EVCNT_INCR(txq, txtusum);
   6707 		fields |= WTX_TXSM;
   6708 		tucs = WTX_TCPIP_TUCSS(offset) |
   6709 		    WTX_TCPIP_TUCSO(offset +
   6710 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6711 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6712 	} else if ((m0->m_pkthdr.csum_flags &
   6713 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6714 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6715 		fields |= WTX_TXSM;
   6716 		tucs = WTX_TCPIP_TUCSS(offset) |
   6717 		    WTX_TCPIP_TUCSO(offset +
   6718 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6719 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6720 	} else {
   6721 		/* Just initialize it to a valid TCP context. */
   6722 		tucs = WTX_TCPIP_TUCSS(offset) |
   6723 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6724 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6725 	}
   6726 
   6727 	/*
   6728 	 * We don't have to write context descriptor for every packet
   6729 	 * except for 82574. For 82574, we must write context descriptor
   6730 	 * for every packet when we use two descriptor queues.
   6731 	 * It would be overhead to write context descriptor for every packet,
   6732 	 * however it does not cause problems.
   6733 	 */
   6734 	/* Fill in the context descriptor. */
   6735 	t = (struct livengood_tcpip_ctxdesc *)
   6736 	    &txq->txq_descs[txq->txq_next];
   6737 	t->tcpip_ipcs = htole32(ipcs);
   6738 	t->tcpip_tucs = htole32(tucs);
   6739 	t->tcpip_cmdlen = htole32(cmdlen);
   6740 	t->tcpip_seg = htole32(seg);
   6741 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6742 
   6743 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6744 	txs->txs_ndesc++;
   6745 
   6746 	*cmdp = cmd;
   6747 	*fieldsp = fields;
   6748 
   6749 	return 0;
   6750 }
   6751 
   6752 static inline int
   6753 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6754 {
   6755 	struct wm_softc *sc = ifp->if_softc;
   6756 	u_int cpuid = cpu_index(curcpu());
   6757 
   6758 	/*
   6759 	 * Currently, simple distribute strategy.
   6760 	 * TODO:
   6761 	 * distribute by flowid(RSS has value).
   6762 	 */
   6763         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6764 }
   6765 
   6766 /*
   6767  * wm_start:		[ifnet interface function]
   6768  *
   6769  *	Start packet transmission on the interface.
   6770  */
   6771 static void
   6772 wm_start(struct ifnet *ifp)
   6773 {
   6774 	struct wm_softc *sc = ifp->if_softc;
   6775 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6776 
   6777 #ifdef WM_MPSAFE
   6778 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6779 #endif
   6780 	/*
   6781 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6782 	 */
   6783 
   6784 	mutex_enter(txq->txq_lock);
   6785 	if (!txq->txq_stopping)
   6786 		wm_start_locked(ifp);
   6787 	mutex_exit(txq->txq_lock);
   6788 }
   6789 
   6790 static void
   6791 wm_start_locked(struct ifnet *ifp)
   6792 {
   6793 	struct wm_softc *sc = ifp->if_softc;
   6794 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6795 
   6796 	wm_send_common_locked(ifp, txq, false);
   6797 }
   6798 
   6799 static int
   6800 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6801 {
   6802 	int qid;
   6803 	struct wm_softc *sc = ifp->if_softc;
   6804 	struct wm_txqueue *txq;
   6805 
   6806 	qid = wm_select_txqueue(ifp, m);
   6807 	txq = &sc->sc_queue[qid].wmq_txq;
   6808 
   6809 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6810 		m_freem(m);
   6811 		WM_Q_EVCNT_INCR(txq, txdrop);
   6812 		return ENOBUFS;
   6813 	}
   6814 
   6815 	/*
   6816 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6817 	 */
   6818 	ifp->if_obytes += m->m_pkthdr.len;
   6819 	if (m->m_flags & M_MCAST)
   6820 		ifp->if_omcasts++;
   6821 
   6822 	if (mutex_tryenter(txq->txq_lock)) {
   6823 		if (!txq->txq_stopping)
   6824 			wm_transmit_locked(ifp, txq);
   6825 		mutex_exit(txq->txq_lock);
   6826 	}
   6827 
   6828 	return 0;
   6829 }
   6830 
   6831 static void
   6832 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6833 {
   6834 
   6835 	wm_send_common_locked(ifp, txq, true);
   6836 }
   6837 
   6838 static void
   6839 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6840     bool is_transmit)
   6841 {
   6842 	struct wm_softc *sc = ifp->if_softc;
   6843 	struct mbuf *m0;
   6844 	struct m_tag *mtag;
   6845 	struct wm_txsoft *txs;
   6846 	bus_dmamap_t dmamap;
   6847 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6848 	bus_addr_t curaddr;
   6849 	bus_size_t seglen, curlen;
   6850 	uint32_t cksumcmd;
   6851 	uint8_t cksumfields;
   6852 
   6853 	KASSERT(mutex_owned(txq->txq_lock));
   6854 
   6855 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6856 		return;
   6857 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6858 		return;
   6859 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6860 		return;
   6861 
   6862 	/* Remember the previous number of free descriptors. */
   6863 	ofree = txq->txq_free;
   6864 
   6865 	/*
   6866 	 * Loop through the send queue, setting up transmit descriptors
   6867 	 * until we drain the queue, or use up all available transmit
   6868 	 * descriptors.
   6869 	 */
   6870 	for (;;) {
   6871 		m0 = NULL;
   6872 
   6873 		/* Get a work queue entry. */
   6874 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6875 			wm_txeof(sc, txq);
   6876 			if (txq->txq_sfree == 0) {
   6877 				DPRINTF(WM_DEBUG_TX,
   6878 				    ("%s: TX: no free job descriptors\n",
   6879 					device_xname(sc->sc_dev)));
   6880 				WM_Q_EVCNT_INCR(txq, txsstall);
   6881 				break;
   6882 			}
   6883 		}
   6884 
   6885 		/* Grab a packet off the queue. */
   6886 		if (is_transmit)
   6887 			m0 = pcq_get(txq->txq_interq);
   6888 		else
   6889 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6890 		if (m0 == NULL)
   6891 			break;
   6892 
   6893 		DPRINTF(WM_DEBUG_TX,
   6894 		    ("%s: TX: have packet to transmit: %p\n",
   6895 		    device_xname(sc->sc_dev), m0));
   6896 
   6897 		txs = &txq->txq_soft[txq->txq_snext];
   6898 		dmamap = txs->txs_dmamap;
   6899 
   6900 		use_tso = (m0->m_pkthdr.csum_flags &
   6901 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6902 
   6903 		/*
   6904 		 * So says the Linux driver:
   6905 		 * The controller does a simple calculation to make sure
   6906 		 * there is enough room in the FIFO before initiating the
   6907 		 * DMA for each buffer.  The calc is:
   6908 		 *	4 = ceil(buffer len / MSS)
   6909 		 * To make sure we don't overrun the FIFO, adjust the max
   6910 		 * buffer len if the MSS drops.
   6911 		 */
   6912 		dmamap->dm_maxsegsz =
   6913 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6914 		    ? m0->m_pkthdr.segsz << 2
   6915 		    : WTX_MAX_LEN;
   6916 
   6917 		/*
   6918 		 * Load the DMA map.  If this fails, the packet either
   6919 		 * didn't fit in the allotted number of segments, or we
   6920 		 * were short on resources.  For the too-many-segments
   6921 		 * case, we simply report an error and drop the packet,
   6922 		 * since we can't sanely copy a jumbo packet to a single
   6923 		 * buffer.
   6924 		 */
   6925 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6926 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6927 		if (error) {
   6928 			if (error == EFBIG) {
   6929 				WM_Q_EVCNT_INCR(txq, txdrop);
   6930 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6931 				    "DMA segments, dropping...\n",
   6932 				    device_xname(sc->sc_dev));
   6933 				wm_dump_mbuf_chain(sc, m0);
   6934 				m_freem(m0);
   6935 				continue;
   6936 			}
   6937 			/*  Short on resources, just stop for now. */
   6938 			DPRINTF(WM_DEBUG_TX,
   6939 			    ("%s: TX: dmamap load failed: %d\n",
   6940 			    device_xname(sc->sc_dev), error));
   6941 			break;
   6942 		}
   6943 
   6944 		segs_needed = dmamap->dm_nsegs;
   6945 		if (use_tso) {
   6946 			/* For sentinel descriptor; see below. */
   6947 			segs_needed++;
   6948 		}
   6949 
   6950 		/*
   6951 		 * Ensure we have enough descriptors free to describe
   6952 		 * the packet.  Note, we always reserve one descriptor
   6953 		 * at the end of the ring due to the semantics of the
   6954 		 * TDT register, plus one more in the event we need
   6955 		 * to load offload context.
   6956 		 */
   6957 		if (segs_needed > txq->txq_free - 2) {
   6958 			/*
   6959 			 * Not enough free descriptors to transmit this
   6960 			 * packet.  We haven't committed anything yet,
   6961 			 * so just unload the DMA map, put the packet
   6962 			 * pack on the queue, and punt.  Notify the upper
   6963 			 * layer that there are no more slots left.
   6964 			 */
   6965 			DPRINTF(WM_DEBUG_TX,
   6966 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6967 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6968 			    segs_needed, txq->txq_free - 1));
   6969 			if (!is_transmit)
   6970 				ifp->if_flags |= IFF_OACTIVE;
   6971 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6972 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6973 			WM_Q_EVCNT_INCR(txq, txdstall);
   6974 			break;
   6975 		}
   6976 
   6977 		/*
   6978 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6979 		 * once we know we can transmit the packet, since we
   6980 		 * do some internal FIFO space accounting here.
   6981 		 */
   6982 		if (sc->sc_type == WM_T_82547 &&
   6983 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6984 			DPRINTF(WM_DEBUG_TX,
   6985 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6986 			    device_xname(sc->sc_dev)));
   6987 			if (!is_transmit)
   6988 				ifp->if_flags |= IFF_OACTIVE;
   6989 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6990 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6991 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6992 			break;
   6993 		}
   6994 
   6995 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6996 
   6997 		DPRINTF(WM_DEBUG_TX,
   6998 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6999 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7000 
   7001 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7002 
   7003 		/*
   7004 		 * Store a pointer to the packet so that we can free it
   7005 		 * later.
   7006 		 *
   7007 		 * Initially, we consider the number of descriptors the
   7008 		 * packet uses the number of DMA segments.  This may be
   7009 		 * incremented by 1 if we do checksum offload (a descriptor
   7010 		 * is used to set the checksum context).
   7011 		 */
   7012 		txs->txs_mbuf = m0;
   7013 		txs->txs_firstdesc = txq->txq_next;
   7014 		txs->txs_ndesc = segs_needed;
   7015 
   7016 		/* Set up offload parameters for this packet. */
   7017 		if (m0->m_pkthdr.csum_flags &
   7018 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7019 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7020 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7021 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7022 					  &cksumfields) != 0) {
   7023 				/* Error message already displayed. */
   7024 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7025 				continue;
   7026 			}
   7027 		} else {
   7028 			cksumcmd = 0;
   7029 			cksumfields = 0;
   7030 		}
   7031 
   7032 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7033 
   7034 		/* Sync the DMA map. */
   7035 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7036 		    BUS_DMASYNC_PREWRITE);
   7037 
   7038 		/* Initialize the transmit descriptor. */
   7039 		for (nexttx = txq->txq_next, seg = 0;
   7040 		     seg < dmamap->dm_nsegs; seg++) {
   7041 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7042 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7043 			     seglen != 0;
   7044 			     curaddr += curlen, seglen -= curlen,
   7045 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7046 				curlen = seglen;
   7047 
   7048 				/*
   7049 				 * So says the Linux driver:
   7050 				 * Work around for premature descriptor
   7051 				 * write-backs in TSO mode.  Append a
   7052 				 * 4-byte sentinel descriptor.
   7053 				 */
   7054 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7055 				    curlen > 8)
   7056 					curlen -= 4;
   7057 
   7058 				wm_set_dma_addr(
   7059 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7060 				txq->txq_descs[nexttx].wtx_cmdlen
   7061 				    = htole32(cksumcmd | curlen);
   7062 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7063 				    = 0;
   7064 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7065 				    = cksumfields;
   7066 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7067 				lasttx = nexttx;
   7068 
   7069 				DPRINTF(WM_DEBUG_TX,
   7070 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7071 				     "len %#04zx\n",
   7072 				    device_xname(sc->sc_dev), nexttx,
   7073 				    (uint64_t)curaddr, curlen));
   7074 			}
   7075 		}
   7076 
   7077 		KASSERT(lasttx != -1);
   7078 
   7079 		/*
   7080 		 * Set up the command byte on the last descriptor of
   7081 		 * the packet.  If we're in the interrupt delay window,
   7082 		 * delay the interrupt.
   7083 		 */
   7084 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7085 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7086 
   7087 		/*
   7088 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7089 		 * up the descriptor to encapsulate the packet for us.
   7090 		 *
   7091 		 * This is only valid on the last descriptor of the packet.
   7092 		 */
   7093 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7094 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7095 			    htole32(WTX_CMD_VLE);
   7096 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7097 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7098 		}
   7099 
   7100 		txs->txs_lastdesc = lasttx;
   7101 
   7102 		DPRINTF(WM_DEBUG_TX,
   7103 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7104 		    device_xname(sc->sc_dev),
   7105 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7106 
   7107 		/* Sync the descriptors we're using. */
   7108 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7109 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7110 
   7111 		/* Give the packet to the chip. */
   7112 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7113 
   7114 		DPRINTF(WM_DEBUG_TX,
   7115 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7116 
   7117 		DPRINTF(WM_DEBUG_TX,
   7118 		    ("%s: TX: finished transmitting packet, job %d\n",
   7119 		    device_xname(sc->sc_dev), txq->txq_snext));
   7120 
   7121 		/* Advance the tx pointer. */
   7122 		txq->txq_free -= txs->txs_ndesc;
   7123 		txq->txq_next = nexttx;
   7124 
   7125 		txq->txq_sfree--;
   7126 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7127 
   7128 		/* Pass the packet to any BPF listeners. */
   7129 		bpf_mtap(ifp, m0);
   7130 	}
   7131 
   7132 	if (m0 != NULL) {
   7133 		if (!is_transmit)
   7134 			ifp->if_flags |= IFF_OACTIVE;
   7135 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7136 		WM_Q_EVCNT_INCR(txq, txdrop);
   7137 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7138 			__func__));
   7139 		m_freem(m0);
   7140 	}
   7141 
   7142 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7143 		/* No more slots; notify upper layer. */
   7144 		if (!is_transmit)
   7145 			ifp->if_flags |= IFF_OACTIVE;
   7146 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7147 	}
   7148 
   7149 	if (txq->txq_free != ofree) {
   7150 		/* Set a watchdog timer in case the chip flakes out. */
   7151 		ifp->if_timer = 5;
   7152 	}
   7153 }
   7154 
   7155 /*
   7156  * wm_nq_tx_offload:
   7157  *
   7158  *	Set up TCP/IP checksumming parameters for the
   7159  *	specified packet, for NEWQUEUE devices
   7160  */
   7161 static int
   7162 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7163     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7164 {
   7165 	struct mbuf *m0 = txs->txs_mbuf;
   7166 	struct m_tag *mtag;
   7167 	uint32_t vl_len, mssidx, cmdc;
   7168 	struct ether_header *eh;
   7169 	int offset, iphl;
   7170 
   7171 	/*
   7172 	 * XXX It would be nice if the mbuf pkthdr had offset
   7173 	 * fields for the protocol headers.
   7174 	 */
   7175 	*cmdlenp = 0;
   7176 	*fieldsp = 0;
   7177 
   7178 	eh = mtod(m0, struct ether_header *);
   7179 	switch (htons(eh->ether_type)) {
   7180 	case ETHERTYPE_IP:
   7181 	case ETHERTYPE_IPV6:
   7182 		offset = ETHER_HDR_LEN;
   7183 		break;
   7184 
   7185 	case ETHERTYPE_VLAN:
   7186 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7187 		break;
   7188 
   7189 	default:
   7190 		/* Don't support this protocol or encapsulation. */
   7191 		*do_csum = false;
   7192 		return 0;
   7193 	}
   7194 	*do_csum = true;
   7195 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7196 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7197 
   7198 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7199 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7200 
   7201 	if ((m0->m_pkthdr.csum_flags &
   7202 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7203 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7204 	} else {
   7205 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7206 	}
   7207 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7208 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7209 
   7210 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7211 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7212 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7213 		*cmdlenp |= NQTX_CMD_VLE;
   7214 	}
   7215 
   7216 	mssidx = 0;
   7217 
   7218 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7219 		int hlen = offset + iphl;
   7220 		int tcp_hlen;
   7221 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7222 
   7223 		if (__predict_false(m0->m_len <
   7224 				    (hlen + sizeof(struct tcphdr)))) {
   7225 			/*
   7226 			 * TCP/IP headers are not in the first mbuf; we need
   7227 			 * to do this the slow and painful way.  Let's just
   7228 			 * hope this doesn't happen very often.
   7229 			 */
   7230 			struct tcphdr th;
   7231 
   7232 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7233 
   7234 			m_copydata(m0, hlen, sizeof(th), &th);
   7235 			if (v4) {
   7236 				struct ip ip;
   7237 
   7238 				m_copydata(m0, offset, sizeof(ip), &ip);
   7239 				ip.ip_len = 0;
   7240 				m_copyback(m0,
   7241 				    offset + offsetof(struct ip, ip_len),
   7242 				    sizeof(ip.ip_len), &ip.ip_len);
   7243 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7244 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7245 			} else {
   7246 				struct ip6_hdr ip6;
   7247 
   7248 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7249 				ip6.ip6_plen = 0;
   7250 				m_copyback(m0,
   7251 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7252 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7253 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7254 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7255 			}
   7256 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7257 			    sizeof(th.th_sum), &th.th_sum);
   7258 
   7259 			tcp_hlen = th.th_off << 2;
   7260 		} else {
   7261 			/*
   7262 			 * TCP/IP headers are in the first mbuf; we can do
   7263 			 * this the easy way.
   7264 			 */
   7265 			struct tcphdr *th;
   7266 
   7267 			if (v4) {
   7268 				struct ip *ip =
   7269 				    (void *)(mtod(m0, char *) + offset);
   7270 				th = (void *)(mtod(m0, char *) + hlen);
   7271 
   7272 				ip->ip_len = 0;
   7273 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7274 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7275 			} else {
   7276 				struct ip6_hdr *ip6 =
   7277 				    (void *)(mtod(m0, char *) + offset);
   7278 				th = (void *)(mtod(m0, char *) + hlen);
   7279 
   7280 				ip6->ip6_plen = 0;
   7281 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7282 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7283 			}
   7284 			tcp_hlen = th->th_off << 2;
   7285 		}
   7286 		hlen += tcp_hlen;
   7287 		*cmdlenp |= NQTX_CMD_TSE;
   7288 
   7289 		if (v4) {
   7290 			WM_Q_EVCNT_INCR(txq, txtso);
   7291 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7292 		} else {
   7293 			WM_Q_EVCNT_INCR(txq, txtso6);
   7294 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7295 		}
   7296 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7297 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7298 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7299 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7300 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7301 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7302 	} else {
   7303 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7304 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7305 	}
   7306 
   7307 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7308 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7309 		cmdc |= NQTXC_CMD_IP4;
   7310 	}
   7311 
   7312 	if (m0->m_pkthdr.csum_flags &
   7313 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7314 		WM_Q_EVCNT_INCR(txq, txtusum);
   7315 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7316 			cmdc |= NQTXC_CMD_TCP;
   7317 		} else {
   7318 			cmdc |= NQTXC_CMD_UDP;
   7319 		}
   7320 		cmdc |= NQTXC_CMD_IP4;
   7321 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7322 	}
   7323 	if (m0->m_pkthdr.csum_flags &
   7324 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7325 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7326 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7327 			cmdc |= NQTXC_CMD_TCP;
   7328 		} else {
   7329 			cmdc |= NQTXC_CMD_UDP;
   7330 		}
   7331 		cmdc |= NQTXC_CMD_IP6;
   7332 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7333 	}
   7334 
   7335 	/*
   7336 	 * We don't have to write context descriptor for every packet to
   7337 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7338 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7339 	 * controllers.
   7340 	 * It would be overhead to write context descriptor for every packet,
   7341 	 * however it does not cause problems.
   7342 	 */
   7343 	/* Fill in the context descriptor. */
   7344 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7345 	    htole32(vl_len);
   7346 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7347 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7348 	    htole32(cmdc);
   7349 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7350 	    htole32(mssidx);
   7351 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7352 	DPRINTF(WM_DEBUG_TX,
   7353 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7354 	    txq->txq_next, 0, vl_len));
   7355 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7356 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7357 	txs->txs_ndesc++;
   7358 	return 0;
   7359 }
   7360 
   7361 /*
   7362  * wm_nq_start:		[ifnet interface function]
   7363  *
   7364  *	Start packet transmission on the interface for NEWQUEUE devices
   7365  */
   7366 static void
   7367 wm_nq_start(struct ifnet *ifp)
   7368 {
   7369 	struct wm_softc *sc = ifp->if_softc;
   7370 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7371 
   7372 #ifdef WM_MPSAFE
   7373 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7374 #endif
   7375 	/*
   7376 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7377 	 */
   7378 
   7379 	mutex_enter(txq->txq_lock);
   7380 	if (!txq->txq_stopping)
   7381 		wm_nq_start_locked(ifp);
   7382 	mutex_exit(txq->txq_lock);
   7383 }
   7384 
   7385 static void
   7386 wm_nq_start_locked(struct ifnet *ifp)
   7387 {
   7388 	struct wm_softc *sc = ifp->if_softc;
   7389 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7390 
   7391 	wm_nq_send_common_locked(ifp, txq, false);
   7392 }
   7393 
   7394 static int
   7395 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7396 {
   7397 	int qid;
   7398 	struct wm_softc *sc = ifp->if_softc;
   7399 	struct wm_txqueue *txq;
   7400 
   7401 	qid = wm_select_txqueue(ifp, m);
   7402 	txq = &sc->sc_queue[qid].wmq_txq;
   7403 
   7404 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7405 		m_freem(m);
   7406 		WM_Q_EVCNT_INCR(txq, txdrop);
   7407 		return ENOBUFS;
   7408 	}
   7409 
   7410 	/*
   7411 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7412 	 */
   7413 	ifp->if_obytes += m->m_pkthdr.len;
   7414 	if (m->m_flags & M_MCAST)
   7415 		ifp->if_omcasts++;
   7416 
   7417 	/*
   7418 	 * The situations which this mutex_tryenter() fails at running time
   7419 	 * are below two patterns.
   7420 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7421 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7422 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7423 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7424 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7425 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7426 	 */
   7427 	if (mutex_tryenter(txq->txq_lock)) {
   7428 		if (!txq->txq_stopping)
   7429 			wm_nq_transmit_locked(ifp, txq);
   7430 		mutex_exit(txq->txq_lock);
   7431 	}
   7432 
   7433 	return 0;
   7434 }
   7435 
   7436 static void
   7437 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7438 {
   7439 
   7440 	wm_nq_send_common_locked(ifp, txq, true);
   7441 }
   7442 
   7443 static void
   7444 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7445     bool is_transmit)
   7446 {
   7447 	struct wm_softc *sc = ifp->if_softc;
   7448 	struct mbuf *m0;
   7449 	struct m_tag *mtag;
   7450 	struct wm_txsoft *txs;
   7451 	bus_dmamap_t dmamap;
   7452 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7453 	bool do_csum, sent;
   7454 
   7455 	KASSERT(mutex_owned(txq->txq_lock));
   7456 
   7457 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7458 		return;
   7459 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7460 		return;
   7461 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7462 		return;
   7463 
   7464 	sent = false;
   7465 
   7466 	/*
   7467 	 * Loop through the send queue, setting up transmit descriptors
   7468 	 * until we drain the queue, or use up all available transmit
   7469 	 * descriptors.
   7470 	 */
   7471 	for (;;) {
   7472 		m0 = NULL;
   7473 
   7474 		/* Get a work queue entry. */
   7475 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7476 			wm_txeof(sc, txq);
   7477 			if (txq->txq_sfree == 0) {
   7478 				DPRINTF(WM_DEBUG_TX,
   7479 				    ("%s: TX: no free job descriptors\n",
   7480 					device_xname(sc->sc_dev)));
   7481 				WM_Q_EVCNT_INCR(txq, txsstall);
   7482 				break;
   7483 			}
   7484 		}
   7485 
   7486 		/* Grab a packet off the queue. */
   7487 		if (is_transmit)
   7488 			m0 = pcq_get(txq->txq_interq);
   7489 		else
   7490 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7491 		if (m0 == NULL)
   7492 			break;
   7493 
   7494 		DPRINTF(WM_DEBUG_TX,
   7495 		    ("%s: TX: have packet to transmit: %p\n",
   7496 		    device_xname(sc->sc_dev), m0));
   7497 
   7498 		txs = &txq->txq_soft[txq->txq_snext];
   7499 		dmamap = txs->txs_dmamap;
   7500 
   7501 		/*
   7502 		 * Load the DMA map.  If this fails, the packet either
   7503 		 * didn't fit in the allotted number of segments, or we
   7504 		 * were short on resources.  For the too-many-segments
   7505 		 * case, we simply report an error and drop the packet,
   7506 		 * since we can't sanely copy a jumbo packet to a single
   7507 		 * buffer.
   7508 		 */
   7509 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7510 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7511 		if (error) {
   7512 			if (error == EFBIG) {
   7513 				WM_Q_EVCNT_INCR(txq, txdrop);
   7514 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7515 				    "DMA segments, dropping...\n",
   7516 				    device_xname(sc->sc_dev));
   7517 				wm_dump_mbuf_chain(sc, m0);
   7518 				m_freem(m0);
   7519 				continue;
   7520 			}
   7521 			/* Short on resources, just stop for now. */
   7522 			DPRINTF(WM_DEBUG_TX,
   7523 			    ("%s: TX: dmamap load failed: %d\n",
   7524 			    device_xname(sc->sc_dev), error));
   7525 			break;
   7526 		}
   7527 
   7528 		segs_needed = dmamap->dm_nsegs;
   7529 
   7530 		/*
   7531 		 * Ensure we have enough descriptors free to describe
   7532 		 * the packet.  Note, we always reserve one descriptor
   7533 		 * at the end of the ring due to the semantics of the
   7534 		 * TDT register, plus one more in the event we need
   7535 		 * to load offload context.
   7536 		 */
   7537 		if (segs_needed > txq->txq_free - 2) {
   7538 			/*
   7539 			 * Not enough free descriptors to transmit this
   7540 			 * packet.  We haven't committed anything yet,
   7541 			 * so just unload the DMA map, put the packet
   7542 			 * pack on the queue, and punt.  Notify the upper
   7543 			 * layer that there are no more slots left.
   7544 			 */
   7545 			DPRINTF(WM_DEBUG_TX,
   7546 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7547 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7548 			    segs_needed, txq->txq_free - 1));
   7549 			if (!is_transmit)
   7550 				ifp->if_flags |= IFF_OACTIVE;
   7551 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7552 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7553 			WM_Q_EVCNT_INCR(txq, txdstall);
   7554 			break;
   7555 		}
   7556 
   7557 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7558 
   7559 		DPRINTF(WM_DEBUG_TX,
   7560 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7561 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7562 
   7563 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7564 
   7565 		/*
   7566 		 * Store a pointer to the packet so that we can free it
   7567 		 * later.
   7568 		 *
   7569 		 * Initially, we consider the number of descriptors the
   7570 		 * packet uses the number of DMA segments.  This may be
   7571 		 * incremented by 1 if we do checksum offload (a descriptor
   7572 		 * is used to set the checksum context).
   7573 		 */
   7574 		txs->txs_mbuf = m0;
   7575 		txs->txs_firstdesc = txq->txq_next;
   7576 		txs->txs_ndesc = segs_needed;
   7577 
   7578 		/* Set up offload parameters for this packet. */
   7579 		uint32_t cmdlen, fields, dcmdlen;
   7580 		if (m0->m_pkthdr.csum_flags &
   7581 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7582 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7583 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7584 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7585 			    &do_csum) != 0) {
   7586 				/* Error message already displayed. */
   7587 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7588 				continue;
   7589 			}
   7590 		} else {
   7591 			do_csum = false;
   7592 			cmdlen = 0;
   7593 			fields = 0;
   7594 		}
   7595 
   7596 		/* Sync the DMA map. */
   7597 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7598 		    BUS_DMASYNC_PREWRITE);
   7599 
   7600 		/* Initialize the first transmit descriptor. */
   7601 		nexttx = txq->txq_next;
   7602 		if (!do_csum) {
   7603 			/* setup a legacy descriptor */
   7604 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7605 			    dmamap->dm_segs[0].ds_addr);
   7606 			txq->txq_descs[nexttx].wtx_cmdlen =
   7607 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7608 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7609 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7610 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7611 			    NULL) {
   7612 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7613 				    htole32(WTX_CMD_VLE);
   7614 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7615 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7616 			} else {
   7617 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7618 			}
   7619 			dcmdlen = 0;
   7620 		} else {
   7621 			/* setup an advanced data descriptor */
   7622 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7623 			    htole64(dmamap->dm_segs[0].ds_addr);
   7624 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7625 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7626 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7627 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7628 			    htole32(fields);
   7629 			DPRINTF(WM_DEBUG_TX,
   7630 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7631 			    device_xname(sc->sc_dev), nexttx,
   7632 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7633 			DPRINTF(WM_DEBUG_TX,
   7634 			    ("\t 0x%08x%08x\n", fields,
   7635 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7636 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7637 		}
   7638 
   7639 		lasttx = nexttx;
   7640 		nexttx = WM_NEXTTX(txq, nexttx);
   7641 		/*
   7642 		 * fill in the next descriptors. legacy or adcanced format
   7643 		 * is the same here
   7644 		 */
   7645 		for (seg = 1; seg < dmamap->dm_nsegs;
   7646 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7647 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7648 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7649 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7650 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7651 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7652 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7653 			lasttx = nexttx;
   7654 
   7655 			DPRINTF(WM_DEBUG_TX,
   7656 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7657 			     "len %#04zx\n",
   7658 			    device_xname(sc->sc_dev), nexttx,
   7659 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7660 			    dmamap->dm_segs[seg].ds_len));
   7661 		}
   7662 
   7663 		KASSERT(lasttx != -1);
   7664 
   7665 		/*
   7666 		 * Set up the command byte on the last descriptor of
   7667 		 * the packet.  If we're in the interrupt delay window,
   7668 		 * delay the interrupt.
   7669 		 */
   7670 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7671 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7672 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7673 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7674 
   7675 		txs->txs_lastdesc = lasttx;
   7676 
   7677 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7678 		    device_xname(sc->sc_dev),
   7679 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7680 
   7681 		/* Sync the descriptors we're using. */
   7682 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7683 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7684 
   7685 		/* Give the packet to the chip. */
   7686 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7687 		sent = true;
   7688 
   7689 		DPRINTF(WM_DEBUG_TX,
   7690 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7691 
   7692 		DPRINTF(WM_DEBUG_TX,
   7693 		    ("%s: TX: finished transmitting packet, job %d\n",
   7694 		    device_xname(sc->sc_dev), txq->txq_snext));
   7695 
   7696 		/* Advance the tx pointer. */
   7697 		txq->txq_free -= txs->txs_ndesc;
   7698 		txq->txq_next = nexttx;
   7699 
   7700 		txq->txq_sfree--;
   7701 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7702 
   7703 		/* Pass the packet to any BPF listeners. */
   7704 		bpf_mtap(ifp, m0);
   7705 	}
   7706 
   7707 	if (m0 != NULL) {
   7708 		if (!is_transmit)
   7709 			ifp->if_flags |= IFF_OACTIVE;
   7710 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7711 		WM_Q_EVCNT_INCR(txq, txdrop);
   7712 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7713 			__func__));
   7714 		m_freem(m0);
   7715 	}
   7716 
   7717 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7718 		/* No more slots; notify upper layer. */
   7719 		if (!is_transmit)
   7720 			ifp->if_flags |= IFF_OACTIVE;
   7721 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7722 	}
   7723 
   7724 	if (sent) {
   7725 		/* Set a watchdog timer in case the chip flakes out. */
   7726 		ifp->if_timer = 5;
   7727 	}
   7728 }
   7729 
   7730 static void
   7731 wm_deferred_start_locked(struct wm_txqueue *txq)
   7732 {
   7733 	struct wm_softc *sc = txq->txq_sc;
   7734 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7735 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7736 	int qid = wmq->wmq_id;
   7737 
   7738 	KASSERT(mutex_owned(txq->txq_lock));
   7739 
   7740 	if (txq->txq_stopping) {
   7741 		mutex_exit(txq->txq_lock);
   7742 		return;
   7743 	}
   7744 
   7745 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7746 		/* XXX need for ALTQ or one CPU system */
   7747 		if (qid == 0)
   7748 			wm_nq_start_locked(ifp);
   7749 		wm_nq_transmit_locked(ifp, txq);
   7750 	} else {
   7751 		/* XXX need for ALTQ or one CPU system */
   7752 		if (qid == 0)
   7753 			wm_start_locked(ifp);
   7754 		wm_transmit_locked(ifp, txq);
   7755 	}
   7756 }
   7757 
   7758 /* Interrupt */
   7759 
   7760 /*
   7761  * wm_txeof:
   7762  *
   7763  *	Helper; handle transmit interrupts.
   7764  */
   7765 static int
   7766 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7767 {
   7768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7769 	struct wm_txsoft *txs;
   7770 	bool processed = false;
   7771 	int count = 0;
   7772 	int i;
   7773 	uint8_t status;
   7774 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7775 
   7776 	KASSERT(mutex_owned(txq->txq_lock));
   7777 
   7778 	if (txq->txq_stopping)
   7779 		return 0;
   7780 
   7781 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7782 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7783 	if (wmq->wmq_id == 0)
   7784 		ifp->if_flags &= ~IFF_OACTIVE;
   7785 
   7786 	/*
   7787 	 * Go through the Tx list and free mbufs for those
   7788 	 * frames which have been transmitted.
   7789 	 */
   7790 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7791 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7792 		txs = &txq->txq_soft[i];
   7793 
   7794 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7795 			device_xname(sc->sc_dev), i));
   7796 
   7797 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7798 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7799 
   7800 		status =
   7801 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7802 		if ((status & WTX_ST_DD) == 0) {
   7803 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7804 			    BUS_DMASYNC_PREREAD);
   7805 			break;
   7806 		}
   7807 
   7808 		processed = true;
   7809 		count++;
   7810 		DPRINTF(WM_DEBUG_TX,
   7811 		    ("%s: TX: job %d done: descs %d..%d\n",
   7812 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7813 		    txs->txs_lastdesc));
   7814 
   7815 		/*
   7816 		 * XXX We should probably be using the statistics
   7817 		 * XXX registers, but I don't know if they exist
   7818 		 * XXX on chips before the i82544.
   7819 		 */
   7820 
   7821 #ifdef WM_EVENT_COUNTERS
   7822 		if (status & WTX_ST_TU)
   7823 			WM_Q_EVCNT_INCR(txq, tu);
   7824 #endif /* WM_EVENT_COUNTERS */
   7825 
   7826 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7827 			ifp->if_oerrors++;
   7828 			if (status & WTX_ST_LC)
   7829 				log(LOG_WARNING, "%s: late collision\n",
   7830 				    device_xname(sc->sc_dev));
   7831 			else if (status & WTX_ST_EC) {
   7832 				ifp->if_collisions += 16;
   7833 				log(LOG_WARNING, "%s: excessive collisions\n",
   7834 				    device_xname(sc->sc_dev));
   7835 			}
   7836 		} else
   7837 			ifp->if_opackets++;
   7838 
   7839 		txq->txq_packets++;
   7840 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7841 
   7842 		txq->txq_free += txs->txs_ndesc;
   7843 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7844 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7845 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7846 		m_freem(txs->txs_mbuf);
   7847 		txs->txs_mbuf = NULL;
   7848 	}
   7849 
   7850 	/* Update the dirty transmit buffer pointer. */
   7851 	txq->txq_sdirty = i;
   7852 	DPRINTF(WM_DEBUG_TX,
   7853 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7854 
   7855 	if (count != 0)
   7856 		rnd_add_uint32(&sc->rnd_source, count);
   7857 
   7858 	/*
   7859 	 * If there are no more pending transmissions, cancel the watchdog
   7860 	 * timer.
   7861 	 */
   7862 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7863 		ifp->if_timer = 0;
   7864 
   7865 	return processed;
   7866 }
   7867 
   7868 static inline uint32_t
   7869 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7870 {
   7871 	struct wm_softc *sc = rxq->rxq_sc;
   7872 
   7873 	if (sc->sc_type == WM_T_82574)
   7874 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7875 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7876 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7877 	else
   7878 		return rxq->rxq_descs[idx].wrx_status;
   7879 }
   7880 
   7881 static inline uint32_t
   7882 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7883 {
   7884 	struct wm_softc *sc = rxq->rxq_sc;
   7885 
   7886 	if (sc->sc_type == WM_T_82574)
   7887 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7888 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7889 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7890 	else
   7891 		return rxq->rxq_descs[idx].wrx_errors;
   7892 }
   7893 
   7894 static inline uint16_t
   7895 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7896 {
   7897 	struct wm_softc *sc = rxq->rxq_sc;
   7898 
   7899 	if (sc->sc_type == WM_T_82574)
   7900 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7901 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7902 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7903 	else
   7904 		return rxq->rxq_descs[idx].wrx_special;
   7905 }
   7906 
   7907 static inline int
   7908 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7909 {
   7910 	struct wm_softc *sc = rxq->rxq_sc;
   7911 
   7912 	if (sc->sc_type == WM_T_82574)
   7913 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7914 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7915 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7916 	else
   7917 		return rxq->rxq_descs[idx].wrx_len;
   7918 }
   7919 
   7920 #ifdef WM_DEBUG
   7921 static inline uint32_t
   7922 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7923 {
   7924 	struct wm_softc *sc = rxq->rxq_sc;
   7925 
   7926 	if (sc->sc_type == WM_T_82574)
   7927 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7928 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7929 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7930 	else
   7931 		return 0;
   7932 }
   7933 
   7934 static inline uint8_t
   7935 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7936 {
   7937 	struct wm_softc *sc = rxq->rxq_sc;
   7938 
   7939 	if (sc->sc_type == WM_T_82574)
   7940 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7941 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7942 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7943 	else
   7944 		return 0;
   7945 }
   7946 #endif /* WM_DEBUG */
   7947 
   7948 static inline bool
   7949 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7950     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7951 {
   7952 
   7953 	if (sc->sc_type == WM_T_82574)
   7954 		return (status & ext_bit) != 0;
   7955 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7956 		return (status & nq_bit) != 0;
   7957 	else
   7958 		return (status & legacy_bit) != 0;
   7959 }
   7960 
   7961 static inline bool
   7962 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7963     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7964 {
   7965 
   7966 	if (sc->sc_type == WM_T_82574)
   7967 		return (error & ext_bit) != 0;
   7968 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7969 		return (error & nq_bit) != 0;
   7970 	else
   7971 		return (error & legacy_bit) != 0;
   7972 }
   7973 
   7974 static inline bool
   7975 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7976 {
   7977 
   7978 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7979 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7980 		return true;
   7981 	else
   7982 		return false;
   7983 }
   7984 
   7985 static inline bool
   7986 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7987 {
   7988 	struct wm_softc *sc = rxq->rxq_sc;
   7989 
   7990 	/* XXXX missing error bit for newqueue? */
   7991 	if (wm_rxdesc_is_set_error(sc, errors,
   7992 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7993 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7994 		NQRXC_ERROR_RXE)) {
   7995 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7996 			log(LOG_WARNING, "%s: symbol error\n",
   7997 			    device_xname(sc->sc_dev));
   7998 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7999 			log(LOG_WARNING, "%s: receive sequence error\n",
   8000 			    device_xname(sc->sc_dev));
   8001 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8002 			log(LOG_WARNING, "%s: CRC error\n",
   8003 			    device_xname(sc->sc_dev));
   8004 		return true;
   8005 	}
   8006 
   8007 	return false;
   8008 }
   8009 
   8010 static inline bool
   8011 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8012 {
   8013 	struct wm_softc *sc = rxq->rxq_sc;
   8014 
   8015 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8016 		NQRXC_STATUS_DD)) {
   8017 		/* We have processed all of the receive descriptors. */
   8018 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8019 		return false;
   8020 	}
   8021 
   8022 	return true;
   8023 }
   8024 
   8025 static inline bool
   8026 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8027     struct mbuf *m)
   8028 {
   8029 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8030 
   8031 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8032 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8033 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8034 	}
   8035 
   8036 	return true;
   8037 }
   8038 
   8039 static inline void
   8040 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8041     uint32_t errors, struct mbuf *m)
   8042 {
   8043 	struct wm_softc *sc = rxq->rxq_sc;
   8044 
   8045 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8046 		if (wm_rxdesc_is_set_status(sc, status,
   8047 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8048 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8049 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8050 			if (wm_rxdesc_is_set_error(sc, errors,
   8051 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8052 				m->m_pkthdr.csum_flags |=
   8053 					M_CSUM_IPv4_BAD;
   8054 		}
   8055 		if (wm_rxdesc_is_set_status(sc, status,
   8056 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8057 			/*
   8058 			 * Note: we don't know if this was TCP or UDP,
   8059 			 * so we just set both bits, and expect the
   8060 			 * upper layers to deal.
   8061 			 */
   8062 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8063 			m->m_pkthdr.csum_flags |=
   8064 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8065 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8066 			if (wm_rxdesc_is_set_error(sc, errors,
   8067 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8068 				m->m_pkthdr.csum_flags |=
   8069 					M_CSUM_TCP_UDP_BAD;
   8070 		}
   8071 	}
   8072 }
   8073 
   8074 /*
   8075  * wm_rxeof:
   8076  *
   8077  *	Helper; handle receive interrupts.
   8078  */
   8079 static void
   8080 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8081 {
   8082 	struct wm_softc *sc = rxq->rxq_sc;
   8083 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8084 	struct wm_rxsoft *rxs;
   8085 	struct mbuf *m;
   8086 	int i, len;
   8087 	int count = 0;
   8088 	uint32_t status, errors;
   8089 	uint16_t vlantag;
   8090 
   8091 	KASSERT(mutex_owned(rxq->rxq_lock));
   8092 
   8093 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8094 		if (limit-- == 0) {
   8095 			rxq->rxq_ptr = i;
   8096 			break;
   8097 		}
   8098 
   8099 		rxs = &rxq->rxq_soft[i];
   8100 
   8101 		DPRINTF(WM_DEBUG_RX,
   8102 		    ("%s: RX: checking descriptor %d\n",
   8103 		    device_xname(sc->sc_dev), i));
   8104 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8105 
   8106 		status = wm_rxdesc_get_status(rxq, i);
   8107 		errors = wm_rxdesc_get_errors(rxq, i);
   8108 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8109 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8110 #ifdef WM_DEBUG
   8111 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8112 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8113 #endif
   8114 
   8115 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8116 			/*
   8117 			 * Update the receive pointer holding rxq_lock
   8118 			 * consistent with increment counter.
   8119 			 */
   8120 			rxq->rxq_ptr = i;
   8121 			break;
   8122 		}
   8123 
   8124 		count++;
   8125 		if (__predict_false(rxq->rxq_discard)) {
   8126 			DPRINTF(WM_DEBUG_RX,
   8127 			    ("%s: RX: discarding contents of descriptor %d\n",
   8128 			    device_xname(sc->sc_dev), i));
   8129 			wm_init_rxdesc(rxq, i);
   8130 			if (wm_rxdesc_is_eop(rxq, status)) {
   8131 				/* Reset our state. */
   8132 				DPRINTF(WM_DEBUG_RX,
   8133 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8134 				    device_xname(sc->sc_dev)));
   8135 				rxq->rxq_discard = 0;
   8136 			}
   8137 			continue;
   8138 		}
   8139 
   8140 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8141 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8142 
   8143 		m = rxs->rxs_mbuf;
   8144 
   8145 		/*
   8146 		 * Add a new receive buffer to the ring, unless of
   8147 		 * course the length is zero. Treat the latter as a
   8148 		 * failed mapping.
   8149 		 */
   8150 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8151 			/*
   8152 			 * Failed, throw away what we've done so
   8153 			 * far, and discard the rest of the packet.
   8154 			 */
   8155 			ifp->if_ierrors++;
   8156 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8157 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8158 			wm_init_rxdesc(rxq, i);
   8159 			if (!wm_rxdesc_is_eop(rxq, status))
   8160 				rxq->rxq_discard = 1;
   8161 			if (rxq->rxq_head != NULL)
   8162 				m_freem(rxq->rxq_head);
   8163 			WM_RXCHAIN_RESET(rxq);
   8164 			DPRINTF(WM_DEBUG_RX,
   8165 			    ("%s: RX: Rx buffer allocation failed, "
   8166 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8167 			    rxq->rxq_discard ? " (discard)" : ""));
   8168 			continue;
   8169 		}
   8170 
   8171 		m->m_len = len;
   8172 		rxq->rxq_len += len;
   8173 		DPRINTF(WM_DEBUG_RX,
   8174 		    ("%s: RX: buffer at %p len %d\n",
   8175 		    device_xname(sc->sc_dev), m->m_data, len));
   8176 
   8177 		/* If this is not the end of the packet, keep looking. */
   8178 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8179 			WM_RXCHAIN_LINK(rxq, m);
   8180 			DPRINTF(WM_DEBUG_RX,
   8181 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8182 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8183 			continue;
   8184 		}
   8185 
   8186 		/*
   8187 		 * Okay, we have the entire packet now.  The chip is
   8188 		 * configured to include the FCS except I350 and I21[01]
   8189 		 * (not all chips can be configured to strip it),
   8190 		 * so we need to trim it.
   8191 		 * May need to adjust length of previous mbuf in the
   8192 		 * chain if the current mbuf is too short.
   8193 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8194 		 * is always set in I350, so we don't trim it.
   8195 		 */
   8196 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8197 		    && (sc->sc_type != WM_T_I210)
   8198 		    && (sc->sc_type != WM_T_I211)) {
   8199 			if (m->m_len < ETHER_CRC_LEN) {
   8200 				rxq->rxq_tail->m_len
   8201 				    -= (ETHER_CRC_LEN - m->m_len);
   8202 				m->m_len = 0;
   8203 			} else
   8204 				m->m_len -= ETHER_CRC_LEN;
   8205 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8206 		} else
   8207 			len = rxq->rxq_len;
   8208 
   8209 		WM_RXCHAIN_LINK(rxq, m);
   8210 
   8211 		*rxq->rxq_tailp = NULL;
   8212 		m = rxq->rxq_head;
   8213 
   8214 		WM_RXCHAIN_RESET(rxq);
   8215 
   8216 		DPRINTF(WM_DEBUG_RX,
   8217 		    ("%s: RX: have entire packet, len -> %d\n",
   8218 		    device_xname(sc->sc_dev), len));
   8219 
   8220 		/* If an error occurred, update stats and drop the packet. */
   8221 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8222 			m_freem(m);
   8223 			continue;
   8224 		}
   8225 
   8226 		/* No errors.  Receive the packet. */
   8227 		m_set_rcvif(m, ifp);
   8228 		m->m_pkthdr.len = len;
   8229 		/*
   8230 		 * TODO
   8231 		 * should be save rsshash and rsstype to this mbuf.
   8232 		 */
   8233 		DPRINTF(WM_DEBUG_RX,
   8234 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8235 			device_xname(sc->sc_dev), rsstype, rsshash));
   8236 
   8237 		/*
   8238 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8239 		 * for us.  Associate the tag with the packet.
   8240 		 */
   8241 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8242 			continue;
   8243 
   8244 		/* Set up checksum info for this packet. */
   8245 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8246 		/*
   8247 		 * Update the receive pointer holding rxq_lock consistent with
   8248 		 * increment counter.
   8249 		 */
   8250 		rxq->rxq_ptr = i;
   8251 		rxq->rxq_packets++;
   8252 		rxq->rxq_bytes += len;
   8253 		mutex_exit(rxq->rxq_lock);
   8254 
   8255 		/* Pass it on. */
   8256 		if_percpuq_enqueue(sc->sc_ipq, m);
   8257 
   8258 		mutex_enter(rxq->rxq_lock);
   8259 
   8260 		if (rxq->rxq_stopping)
   8261 			break;
   8262 	}
   8263 
   8264 	if (count != 0)
   8265 		rnd_add_uint32(&sc->rnd_source, count);
   8266 
   8267 	DPRINTF(WM_DEBUG_RX,
   8268 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8269 }
   8270 
   8271 /*
   8272  * wm_linkintr_gmii:
   8273  *
   8274  *	Helper; handle link interrupts for GMII.
   8275  */
   8276 static void
   8277 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8278 {
   8279 
   8280 	KASSERT(WM_CORE_LOCKED(sc));
   8281 
   8282 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8283 		__func__));
   8284 
   8285 	if (icr & ICR_LSC) {
   8286 		uint32_t reg;
   8287 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8288 
   8289 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8290 			wm_gig_downshift_workaround_ich8lan(sc);
   8291 
   8292 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8293 			device_xname(sc->sc_dev)));
   8294 		mii_pollstat(&sc->sc_mii);
   8295 		if (sc->sc_type == WM_T_82543) {
   8296 			int miistatus, active;
   8297 
   8298 			/*
   8299 			 * With 82543, we need to force speed and
   8300 			 * duplex on the MAC equal to what the PHY
   8301 			 * speed and duplex configuration is.
   8302 			 */
   8303 			miistatus = sc->sc_mii.mii_media_status;
   8304 
   8305 			if (miistatus & IFM_ACTIVE) {
   8306 				active = sc->sc_mii.mii_media_active;
   8307 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8308 				switch (IFM_SUBTYPE(active)) {
   8309 				case IFM_10_T:
   8310 					sc->sc_ctrl |= CTRL_SPEED_10;
   8311 					break;
   8312 				case IFM_100_TX:
   8313 					sc->sc_ctrl |= CTRL_SPEED_100;
   8314 					break;
   8315 				case IFM_1000_T:
   8316 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8317 					break;
   8318 				default:
   8319 					/*
   8320 					 * fiber?
   8321 					 * Shoud not enter here.
   8322 					 */
   8323 					printf("unknown media (%x)\n", active);
   8324 					break;
   8325 				}
   8326 				if (active & IFM_FDX)
   8327 					sc->sc_ctrl |= CTRL_FD;
   8328 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8329 			}
   8330 		} else if ((sc->sc_type == WM_T_ICH8)
   8331 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8332 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8333 		} else if (sc->sc_type == WM_T_PCH) {
   8334 			wm_k1_gig_workaround_hv(sc,
   8335 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8336 		}
   8337 
   8338 		if ((sc->sc_phytype == WMPHY_82578)
   8339 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8340 			== IFM_1000_T)) {
   8341 
   8342 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8343 				delay(200*1000); /* XXX too big */
   8344 
   8345 				/* Link stall fix for link up */
   8346 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8347 				    HV_MUX_DATA_CTRL,
   8348 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8349 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8350 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8351 				    HV_MUX_DATA_CTRL,
   8352 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8353 			}
   8354 		}
   8355 		/*
   8356 		 * I217 Packet Loss issue:
   8357 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8358 		 * on power up.
   8359 		 * Set the Beacon Duration for I217 to 8 usec
   8360 		 */
   8361 		if ((sc->sc_type == WM_T_PCH_LPT)
   8362 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8363 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8364 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8365 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8366 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8367 		}
   8368 
   8369 		/* XXX Work-around I218 hang issue */
   8370 		/* e1000_k1_workaround_lpt_lp() */
   8371 
   8372 		if ((sc->sc_type == WM_T_PCH_LPT)
   8373 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8374 			/*
   8375 			 * Set platform power management values for Latency
   8376 			 * Tolerance Reporting (LTR)
   8377 			 */
   8378 			wm_platform_pm_pch_lpt(sc,
   8379 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8380 				    != 0));
   8381 		}
   8382 
   8383 		/* FEXTNVM6 K1-off workaround */
   8384 		if (sc->sc_type == WM_T_PCH_SPT) {
   8385 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8386 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8387 			    & FEXTNVM6_K1_OFF_ENABLE)
   8388 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8389 			else
   8390 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8391 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8392 		}
   8393 	} else if (icr & ICR_RXSEQ) {
   8394 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8395 			device_xname(sc->sc_dev)));
   8396 	}
   8397 }
   8398 
   8399 /*
   8400  * wm_linkintr_tbi:
   8401  *
   8402  *	Helper; handle link interrupts for TBI mode.
   8403  */
   8404 static void
   8405 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8406 {
   8407 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8408 	uint32_t status;
   8409 
   8410 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8411 		__func__));
   8412 
   8413 	status = CSR_READ(sc, WMREG_STATUS);
   8414 	if (icr & ICR_LSC) {
   8415 		if (status & STATUS_LU) {
   8416 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8417 			    device_xname(sc->sc_dev),
   8418 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8419 			/*
   8420 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8421 			 * so we should update sc->sc_ctrl
   8422 			 */
   8423 
   8424 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8425 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8426 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8427 			if (status & STATUS_FD)
   8428 				sc->sc_tctl |=
   8429 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8430 			else
   8431 				sc->sc_tctl |=
   8432 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8433 			if (sc->sc_ctrl & CTRL_TFCE)
   8434 				sc->sc_fcrtl |= FCRTL_XONE;
   8435 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8436 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8437 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8438 				      sc->sc_fcrtl);
   8439 			sc->sc_tbi_linkup = 1;
   8440 			if_link_state_change(ifp, LINK_STATE_UP);
   8441 		} else {
   8442 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8443 			    device_xname(sc->sc_dev)));
   8444 			sc->sc_tbi_linkup = 0;
   8445 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8446 		}
   8447 		/* Update LED */
   8448 		wm_tbi_serdes_set_linkled(sc);
   8449 	} else if (icr & ICR_RXSEQ) {
   8450 		DPRINTF(WM_DEBUG_LINK,
   8451 		    ("%s: LINK: Receive sequence error\n",
   8452 		    device_xname(sc->sc_dev)));
   8453 	}
   8454 }
   8455 
   8456 /*
   8457  * wm_linkintr_serdes:
   8458  *
   8459  *	Helper; handle link interrupts for TBI mode.
   8460  */
   8461 static void
   8462 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8463 {
   8464 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8465 	struct mii_data *mii = &sc->sc_mii;
   8466 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8467 	uint32_t pcs_adv, pcs_lpab, reg;
   8468 
   8469 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8470 		__func__));
   8471 
   8472 	if (icr & ICR_LSC) {
   8473 		/* Check PCS */
   8474 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8475 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8476 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8477 				device_xname(sc->sc_dev)));
   8478 			mii->mii_media_status |= IFM_ACTIVE;
   8479 			sc->sc_tbi_linkup = 1;
   8480 			if_link_state_change(ifp, LINK_STATE_UP);
   8481 		} else {
   8482 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8483 				device_xname(sc->sc_dev)));
   8484 			mii->mii_media_status |= IFM_NONE;
   8485 			sc->sc_tbi_linkup = 0;
   8486 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8487 			wm_tbi_serdes_set_linkled(sc);
   8488 			return;
   8489 		}
   8490 		mii->mii_media_active |= IFM_1000_SX;
   8491 		if ((reg & PCS_LSTS_FDX) != 0)
   8492 			mii->mii_media_active |= IFM_FDX;
   8493 		else
   8494 			mii->mii_media_active |= IFM_HDX;
   8495 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8496 			/* Check flow */
   8497 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8498 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8499 				DPRINTF(WM_DEBUG_LINK,
   8500 				    ("XXX LINKOK but not ACOMP\n"));
   8501 				return;
   8502 			}
   8503 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8504 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8505 			DPRINTF(WM_DEBUG_LINK,
   8506 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8507 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8508 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8509 				mii->mii_media_active |= IFM_FLOW
   8510 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8511 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8512 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8513 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8514 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8515 				mii->mii_media_active |= IFM_FLOW
   8516 				    | IFM_ETH_TXPAUSE;
   8517 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8518 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8519 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8520 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8521 				mii->mii_media_active |= IFM_FLOW
   8522 				    | IFM_ETH_RXPAUSE;
   8523 		}
   8524 		/* Update LED */
   8525 		wm_tbi_serdes_set_linkled(sc);
   8526 	} else {
   8527 		DPRINTF(WM_DEBUG_LINK,
   8528 		    ("%s: LINK: Receive sequence error\n",
   8529 		    device_xname(sc->sc_dev)));
   8530 	}
   8531 }
   8532 
   8533 /*
   8534  * wm_linkintr:
   8535  *
   8536  *	Helper; handle link interrupts.
   8537  */
   8538 static void
   8539 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8540 {
   8541 
   8542 	KASSERT(WM_CORE_LOCKED(sc));
   8543 
   8544 	if (sc->sc_flags & WM_F_HAS_MII)
   8545 		wm_linkintr_gmii(sc, icr);
   8546 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8547 	    && (sc->sc_type >= WM_T_82575))
   8548 		wm_linkintr_serdes(sc, icr);
   8549 	else
   8550 		wm_linkintr_tbi(sc, icr);
   8551 }
   8552 
   8553 /*
   8554  * wm_intr_legacy:
   8555  *
   8556  *	Interrupt service routine for INTx and MSI.
   8557  */
   8558 static int
   8559 wm_intr_legacy(void *arg)
   8560 {
   8561 	struct wm_softc *sc = arg;
   8562 	struct wm_queue *wmq = &sc->sc_queue[0];
   8563 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8564 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8565 	uint32_t icr, rndval = 0;
   8566 	int handled = 0;
   8567 
   8568 	while (1 /* CONSTCOND */) {
   8569 		icr = CSR_READ(sc, WMREG_ICR);
   8570 		if ((icr & sc->sc_icr) == 0)
   8571 			break;
   8572 		if (handled == 0) {
   8573 			DPRINTF(WM_DEBUG_TX,
   8574 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8575 		}
   8576 		if (rndval == 0)
   8577 			rndval = icr;
   8578 
   8579 		mutex_enter(rxq->rxq_lock);
   8580 
   8581 		if (rxq->rxq_stopping) {
   8582 			mutex_exit(rxq->rxq_lock);
   8583 			break;
   8584 		}
   8585 
   8586 		handled = 1;
   8587 
   8588 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8589 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8590 			DPRINTF(WM_DEBUG_RX,
   8591 			    ("%s: RX: got Rx intr 0x%08x\n",
   8592 			    device_xname(sc->sc_dev),
   8593 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8594 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8595 		}
   8596 #endif
   8597 		wm_rxeof(rxq, UINT_MAX);
   8598 
   8599 		mutex_exit(rxq->rxq_lock);
   8600 		mutex_enter(txq->txq_lock);
   8601 
   8602 		if (txq->txq_stopping) {
   8603 			mutex_exit(txq->txq_lock);
   8604 			break;
   8605 		}
   8606 
   8607 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8608 		if (icr & ICR_TXDW) {
   8609 			DPRINTF(WM_DEBUG_TX,
   8610 			    ("%s: TX: got TXDW interrupt\n",
   8611 			    device_xname(sc->sc_dev)));
   8612 			WM_Q_EVCNT_INCR(txq, txdw);
   8613 		}
   8614 #endif
   8615 		wm_txeof(sc, txq);
   8616 
   8617 		mutex_exit(txq->txq_lock);
   8618 		WM_CORE_LOCK(sc);
   8619 
   8620 		if (sc->sc_core_stopping) {
   8621 			WM_CORE_UNLOCK(sc);
   8622 			break;
   8623 		}
   8624 
   8625 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8626 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8627 			wm_linkintr(sc, icr);
   8628 		}
   8629 
   8630 		WM_CORE_UNLOCK(sc);
   8631 
   8632 		if (icr & ICR_RXO) {
   8633 #if defined(WM_DEBUG)
   8634 			log(LOG_WARNING, "%s: Receive overrun\n",
   8635 			    device_xname(sc->sc_dev));
   8636 #endif /* defined(WM_DEBUG) */
   8637 		}
   8638 	}
   8639 
   8640 	rnd_add_uint32(&sc->rnd_source, rndval);
   8641 
   8642 	if (handled) {
   8643 		/* Try to get more packets going. */
   8644 		softint_schedule(wmq->wmq_si);
   8645 	}
   8646 
   8647 	return handled;
   8648 }
   8649 
   8650 static inline void
   8651 wm_txrxintr_disable(struct wm_queue *wmq)
   8652 {
   8653 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8654 
   8655 	if (sc->sc_type == WM_T_82574)
   8656 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8657 	else if (sc->sc_type == WM_T_82575)
   8658 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8659 	else
   8660 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8661 }
   8662 
   8663 static inline void
   8664 wm_txrxintr_enable(struct wm_queue *wmq)
   8665 {
   8666 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8667 
   8668 	wm_itrs_calculate(sc, wmq);
   8669 
   8670 	if (sc->sc_type == WM_T_82574)
   8671 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8672 	else if (sc->sc_type == WM_T_82575)
   8673 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8674 	else
   8675 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8676 }
   8677 
   8678 static int
   8679 wm_txrxintr_msix(void *arg)
   8680 {
   8681 	struct wm_queue *wmq = arg;
   8682 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8683 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8684 	struct wm_softc *sc = txq->txq_sc;
   8685 	u_int limit = sc->sc_rx_intr_process_limit;
   8686 
   8687 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8688 
   8689 	DPRINTF(WM_DEBUG_TX,
   8690 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8691 
   8692 	wm_txrxintr_disable(wmq);
   8693 
   8694 	mutex_enter(txq->txq_lock);
   8695 
   8696 	if (txq->txq_stopping) {
   8697 		mutex_exit(txq->txq_lock);
   8698 		return 0;
   8699 	}
   8700 
   8701 	WM_Q_EVCNT_INCR(txq, txdw);
   8702 	wm_txeof(sc, txq);
   8703 	/* wm_deferred start() is done in wm_handle_queue(). */
   8704 	mutex_exit(txq->txq_lock);
   8705 
   8706 	DPRINTF(WM_DEBUG_RX,
   8707 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8708 	mutex_enter(rxq->rxq_lock);
   8709 
   8710 	if (rxq->rxq_stopping) {
   8711 		mutex_exit(rxq->rxq_lock);
   8712 		return 0;
   8713 	}
   8714 
   8715 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8716 	wm_rxeof(rxq, limit);
   8717 	mutex_exit(rxq->rxq_lock);
   8718 
   8719 	wm_itrs_writereg(sc, wmq);
   8720 
   8721 	softint_schedule(wmq->wmq_si);
   8722 
   8723 	return 1;
   8724 }
   8725 
   8726 static void
   8727 wm_handle_queue(void *arg)
   8728 {
   8729 	struct wm_queue *wmq = arg;
   8730 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8731 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8732 	struct wm_softc *sc = txq->txq_sc;
   8733 	u_int limit = sc->sc_rx_process_limit;
   8734 
   8735 	mutex_enter(txq->txq_lock);
   8736 	if (txq->txq_stopping) {
   8737 		mutex_exit(txq->txq_lock);
   8738 		return;
   8739 	}
   8740 	wm_txeof(sc, txq);
   8741 	wm_deferred_start_locked(txq);
   8742 	mutex_exit(txq->txq_lock);
   8743 
   8744 	mutex_enter(rxq->rxq_lock);
   8745 	if (rxq->rxq_stopping) {
   8746 		mutex_exit(rxq->rxq_lock);
   8747 		return;
   8748 	}
   8749 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8750 	wm_rxeof(rxq, limit);
   8751 	mutex_exit(rxq->rxq_lock);
   8752 
   8753 	wm_txrxintr_enable(wmq);
   8754 }
   8755 
   8756 /*
   8757  * wm_linkintr_msix:
   8758  *
   8759  *	Interrupt service routine for link status change for MSI-X.
   8760  */
   8761 static int
   8762 wm_linkintr_msix(void *arg)
   8763 {
   8764 	struct wm_softc *sc = arg;
   8765 	uint32_t reg;
   8766 
   8767 	DPRINTF(WM_DEBUG_LINK,
   8768 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8769 
   8770 	reg = CSR_READ(sc, WMREG_ICR);
   8771 	WM_CORE_LOCK(sc);
   8772 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8773 		goto out;
   8774 
   8775 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8776 	wm_linkintr(sc, ICR_LSC);
   8777 
   8778 out:
   8779 	WM_CORE_UNLOCK(sc);
   8780 
   8781 	if (sc->sc_type == WM_T_82574)
   8782 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8783 	else if (sc->sc_type == WM_T_82575)
   8784 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8785 	else
   8786 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8787 
   8788 	return 1;
   8789 }
   8790 
   8791 /*
   8792  * Media related.
   8793  * GMII, SGMII, TBI (and SERDES)
   8794  */
   8795 
   8796 /* Common */
   8797 
   8798 /*
   8799  * wm_tbi_serdes_set_linkled:
   8800  *
   8801  *	Update the link LED on TBI and SERDES devices.
   8802  */
   8803 static void
   8804 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8805 {
   8806 
   8807 	if (sc->sc_tbi_linkup)
   8808 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8809 	else
   8810 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8811 
   8812 	/* 82540 or newer devices are active low */
   8813 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8814 
   8815 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8816 }
   8817 
   8818 /* GMII related */
   8819 
   8820 /*
   8821  * wm_gmii_reset:
   8822  *
   8823  *	Reset the PHY.
   8824  */
   8825 static void
   8826 wm_gmii_reset(struct wm_softc *sc)
   8827 {
   8828 	uint32_t reg;
   8829 	int rv;
   8830 
   8831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8832 		device_xname(sc->sc_dev), __func__));
   8833 
   8834 	rv = sc->phy.acquire(sc);
   8835 	if (rv != 0) {
   8836 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8837 		    __func__);
   8838 		return;
   8839 	}
   8840 
   8841 	switch (sc->sc_type) {
   8842 	case WM_T_82542_2_0:
   8843 	case WM_T_82542_2_1:
   8844 		/* null */
   8845 		break;
   8846 	case WM_T_82543:
   8847 		/*
   8848 		 * With 82543, we need to force speed and duplex on the MAC
   8849 		 * equal to what the PHY speed and duplex configuration is.
   8850 		 * In addition, we need to perform a hardware reset on the PHY
   8851 		 * to take it out of reset.
   8852 		 */
   8853 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8854 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8855 
   8856 		/* The PHY reset pin is active-low. */
   8857 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8858 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8859 		    CTRL_EXT_SWDPIN(4));
   8860 		reg |= CTRL_EXT_SWDPIO(4);
   8861 
   8862 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8863 		CSR_WRITE_FLUSH(sc);
   8864 		delay(10*1000);
   8865 
   8866 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8867 		CSR_WRITE_FLUSH(sc);
   8868 		delay(150);
   8869 #if 0
   8870 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8871 #endif
   8872 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8873 		break;
   8874 	case WM_T_82544:	/* reset 10000us */
   8875 	case WM_T_82540:
   8876 	case WM_T_82545:
   8877 	case WM_T_82545_3:
   8878 	case WM_T_82546:
   8879 	case WM_T_82546_3:
   8880 	case WM_T_82541:
   8881 	case WM_T_82541_2:
   8882 	case WM_T_82547:
   8883 	case WM_T_82547_2:
   8884 	case WM_T_82571:	/* reset 100us */
   8885 	case WM_T_82572:
   8886 	case WM_T_82573:
   8887 	case WM_T_82574:
   8888 	case WM_T_82575:
   8889 	case WM_T_82576:
   8890 	case WM_T_82580:
   8891 	case WM_T_I350:
   8892 	case WM_T_I354:
   8893 	case WM_T_I210:
   8894 	case WM_T_I211:
   8895 	case WM_T_82583:
   8896 	case WM_T_80003:
   8897 		/* generic reset */
   8898 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8899 		CSR_WRITE_FLUSH(sc);
   8900 		delay(20000);
   8901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8902 		CSR_WRITE_FLUSH(sc);
   8903 		delay(20000);
   8904 
   8905 		if ((sc->sc_type == WM_T_82541)
   8906 		    || (sc->sc_type == WM_T_82541_2)
   8907 		    || (sc->sc_type == WM_T_82547)
   8908 		    || (sc->sc_type == WM_T_82547_2)) {
   8909 			/* workaround for igp are done in igp_reset() */
   8910 			/* XXX add code to set LED after phy reset */
   8911 		}
   8912 		break;
   8913 	case WM_T_ICH8:
   8914 	case WM_T_ICH9:
   8915 	case WM_T_ICH10:
   8916 	case WM_T_PCH:
   8917 	case WM_T_PCH2:
   8918 	case WM_T_PCH_LPT:
   8919 	case WM_T_PCH_SPT:
   8920 		/* generic reset */
   8921 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8922 		CSR_WRITE_FLUSH(sc);
   8923 		delay(100);
   8924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8925 		CSR_WRITE_FLUSH(sc);
   8926 		delay(150);
   8927 		break;
   8928 	default:
   8929 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8930 		    __func__);
   8931 		break;
   8932 	}
   8933 
   8934 	sc->phy.release(sc);
   8935 
   8936 	/* get_cfg_done */
   8937 	wm_get_cfg_done(sc);
   8938 
   8939 	/* extra setup */
   8940 	switch (sc->sc_type) {
   8941 	case WM_T_82542_2_0:
   8942 	case WM_T_82542_2_1:
   8943 	case WM_T_82543:
   8944 	case WM_T_82544:
   8945 	case WM_T_82540:
   8946 	case WM_T_82545:
   8947 	case WM_T_82545_3:
   8948 	case WM_T_82546:
   8949 	case WM_T_82546_3:
   8950 	case WM_T_82541_2:
   8951 	case WM_T_82547_2:
   8952 	case WM_T_82571:
   8953 	case WM_T_82572:
   8954 	case WM_T_82573:
   8955 	case WM_T_82574:
   8956 	case WM_T_82583:
   8957 	case WM_T_82575:
   8958 	case WM_T_82576:
   8959 	case WM_T_82580:
   8960 	case WM_T_I350:
   8961 	case WM_T_I354:
   8962 	case WM_T_I210:
   8963 	case WM_T_I211:
   8964 	case WM_T_80003:
   8965 		/* null */
   8966 		break;
   8967 	case WM_T_82541:
   8968 	case WM_T_82547:
   8969 		/* XXX Configure actively LED after PHY reset */
   8970 		break;
   8971 	case WM_T_ICH8:
   8972 	case WM_T_ICH9:
   8973 	case WM_T_ICH10:
   8974 	case WM_T_PCH:
   8975 	case WM_T_PCH2:
   8976 	case WM_T_PCH_LPT:
   8977 	case WM_T_PCH_SPT:
   8978 		wm_phy_post_reset(sc);
   8979 		break;
   8980 	default:
   8981 		panic("%s: unknown type\n", __func__);
   8982 		break;
   8983 	}
   8984 }
   8985 
   8986 /*
   8987  * Setup sc_phytype and mii_{read|write}reg.
   8988  *
   8989  *  To identify PHY type, correct read/write function should be selected.
   8990  * To select correct read/write function, PCI ID or MAC type are required
   8991  * without accessing PHY registers.
   8992  *
   8993  *  On the first call of this function, PHY ID is not known yet. Check
   8994  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8995  * result might be incorrect.
   8996  *
   8997  *  In the second call, PHY OUI and model is used to identify PHY type.
   8998  * It might not be perfpect because of the lack of compared entry, but it
   8999  * would be better than the first call.
   9000  *
   9001  *  If the detected new result and previous assumption is different,
   9002  * diagnous message will be printed.
   9003  */
   9004 static void
   9005 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9006     uint16_t phy_model)
   9007 {
   9008 	device_t dev = sc->sc_dev;
   9009 	struct mii_data *mii = &sc->sc_mii;
   9010 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9011 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9012 	mii_readreg_t new_readreg;
   9013 	mii_writereg_t new_writereg;
   9014 
   9015 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9016 		device_xname(sc->sc_dev), __func__));
   9017 
   9018 	if (mii->mii_readreg == NULL) {
   9019 		/*
   9020 		 *  This is the first call of this function. For ICH and PCH
   9021 		 * variants, it's difficult to determine the PHY access method
   9022 		 * by sc_type, so use the PCI product ID for some devices.
   9023 		 */
   9024 
   9025 		switch (sc->sc_pcidevid) {
   9026 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9027 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9028 			/* 82577 */
   9029 			new_phytype = WMPHY_82577;
   9030 			break;
   9031 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9032 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9033 			/* 82578 */
   9034 			new_phytype = WMPHY_82578;
   9035 			break;
   9036 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9037 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9038 			/* 82579 */
   9039 			new_phytype = WMPHY_82579;
   9040 			break;
   9041 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9042 		case PCI_PRODUCT_INTEL_82801I_BM:
   9043 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9044 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9045 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9046 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9047 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9048 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9049 			/* ICH8, 9, 10 with 82567 */
   9050 			new_phytype = WMPHY_BM;
   9051 			break;
   9052 		default:
   9053 			break;
   9054 		}
   9055 	} else {
   9056 		/* It's not the first call. Use PHY OUI and model */
   9057 		switch (phy_oui) {
   9058 		case MII_OUI_ATHEROS: /* XXX ??? */
   9059 			switch (phy_model) {
   9060 			case 0x0004: /* XXX */
   9061 				new_phytype = WMPHY_82578;
   9062 				break;
   9063 			default:
   9064 				break;
   9065 			}
   9066 			break;
   9067 		case MII_OUI_xxMARVELL:
   9068 			switch (phy_model) {
   9069 			case MII_MODEL_xxMARVELL_I210:
   9070 				new_phytype = WMPHY_I210;
   9071 				break;
   9072 			case MII_MODEL_xxMARVELL_E1011:
   9073 			case MII_MODEL_xxMARVELL_E1000_3:
   9074 			case MII_MODEL_xxMARVELL_E1000_5:
   9075 			case MII_MODEL_xxMARVELL_E1112:
   9076 				new_phytype = WMPHY_M88;
   9077 				break;
   9078 			case MII_MODEL_xxMARVELL_E1149:
   9079 				new_phytype = WMPHY_BM;
   9080 				break;
   9081 			case MII_MODEL_xxMARVELL_E1111:
   9082 			case MII_MODEL_xxMARVELL_I347:
   9083 			case MII_MODEL_xxMARVELL_E1512:
   9084 			case MII_MODEL_xxMARVELL_E1340M:
   9085 			case MII_MODEL_xxMARVELL_E1543:
   9086 				new_phytype = WMPHY_M88;
   9087 				break;
   9088 			case MII_MODEL_xxMARVELL_I82563:
   9089 				new_phytype = WMPHY_GG82563;
   9090 				break;
   9091 			default:
   9092 				break;
   9093 			}
   9094 			break;
   9095 		case MII_OUI_INTEL:
   9096 			switch (phy_model) {
   9097 			case MII_MODEL_INTEL_I82577:
   9098 				new_phytype = WMPHY_82577;
   9099 				break;
   9100 			case MII_MODEL_INTEL_I82579:
   9101 				new_phytype = WMPHY_82579;
   9102 				break;
   9103 			case MII_MODEL_INTEL_I217:
   9104 				new_phytype = WMPHY_I217;
   9105 				break;
   9106 			case MII_MODEL_INTEL_I82580:
   9107 			case MII_MODEL_INTEL_I350:
   9108 				new_phytype = WMPHY_82580;
   9109 				break;
   9110 			default:
   9111 				break;
   9112 			}
   9113 			break;
   9114 		case MII_OUI_yyINTEL:
   9115 			switch (phy_model) {
   9116 			case MII_MODEL_yyINTEL_I82562G:
   9117 			case MII_MODEL_yyINTEL_I82562EM:
   9118 			case MII_MODEL_yyINTEL_I82562ET:
   9119 				new_phytype = WMPHY_IFE;
   9120 				break;
   9121 			case MII_MODEL_yyINTEL_IGP01E1000:
   9122 				new_phytype = WMPHY_IGP;
   9123 				break;
   9124 			case MII_MODEL_yyINTEL_I82566:
   9125 				new_phytype = WMPHY_IGP_3;
   9126 				break;
   9127 			default:
   9128 				break;
   9129 			}
   9130 			break;
   9131 		default:
   9132 			break;
   9133 		}
   9134 		if (new_phytype == WMPHY_UNKNOWN)
   9135 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9136 			    __func__);
   9137 
   9138 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9139 		    && (sc->sc_phytype != new_phytype )) {
   9140 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9141 			    "was incorrect. PHY type from PHY ID = %u\n",
   9142 			    sc->sc_phytype, new_phytype);
   9143 		}
   9144 	}
   9145 
   9146 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9147 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9148 		/* SGMII */
   9149 		new_readreg = wm_sgmii_readreg;
   9150 		new_writereg = wm_sgmii_writereg;
   9151 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9152 		/* BM2 (phyaddr == 1) */
   9153 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9154 		    && (new_phytype != WMPHY_BM)
   9155 		    && (new_phytype != WMPHY_UNKNOWN))
   9156 			doubt_phytype = new_phytype;
   9157 		new_phytype = WMPHY_BM;
   9158 		new_readreg = wm_gmii_bm_readreg;
   9159 		new_writereg = wm_gmii_bm_writereg;
   9160 	} else if (sc->sc_type >= WM_T_PCH) {
   9161 		/* All PCH* use _hv_ */
   9162 		new_readreg = wm_gmii_hv_readreg;
   9163 		new_writereg = wm_gmii_hv_writereg;
   9164 	} else if (sc->sc_type >= WM_T_ICH8) {
   9165 		/* non-82567 ICH8, 9 and 10 */
   9166 		new_readreg = wm_gmii_i82544_readreg;
   9167 		new_writereg = wm_gmii_i82544_writereg;
   9168 	} else if (sc->sc_type >= WM_T_80003) {
   9169 		/* 80003 */
   9170 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9171 		    && (new_phytype != WMPHY_GG82563)
   9172 		    && (new_phytype != WMPHY_UNKNOWN))
   9173 			doubt_phytype = new_phytype;
   9174 		new_phytype = WMPHY_GG82563;
   9175 		new_readreg = wm_gmii_i80003_readreg;
   9176 		new_writereg = wm_gmii_i80003_writereg;
   9177 	} else if (sc->sc_type >= WM_T_I210) {
   9178 		/* I210 and I211 */
   9179 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9180 		    && (new_phytype != WMPHY_I210)
   9181 		    && (new_phytype != WMPHY_UNKNOWN))
   9182 			doubt_phytype = new_phytype;
   9183 		new_phytype = WMPHY_I210;
   9184 		new_readreg = wm_gmii_gs40g_readreg;
   9185 		new_writereg = wm_gmii_gs40g_writereg;
   9186 	} else if (sc->sc_type >= WM_T_82580) {
   9187 		/* 82580, I350 and I354 */
   9188 		new_readreg = wm_gmii_82580_readreg;
   9189 		new_writereg = wm_gmii_82580_writereg;
   9190 	} else if (sc->sc_type >= WM_T_82544) {
   9191 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9192 		new_readreg = wm_gmii_i82544_readreg;
   9193 		new_writereg = wm_gmii_i82544_writereg;
   9194 	} else {
   9195 		new_readreg = wm_gmii_i82543_readreg;
   9196 		new_writereg = wm_gmii_i82543_writereg;
   9197 	}
   9198 
   9199 	if (new_phytype == WMPHY_BM) {
   9200 		/* All BM use _bm_ */
   9201 		new_readreg = wm_gmii_bm_readreg;
   9202 		new_writereg = wm_gmii_bm_writereg;
   9203 	}
   9204 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9205 		/* All PCH* use _hv_ */
   9206 		new_readreg = wm_gmii_hv_readreg;
   9207 		new_writereg = wm_gmii_hv_writereg;
   9208 	}
   9209 
   9210 	/* Diag output */
   9211 	if (doubt_phytype != WMPHY_UNKNOWN)
   9212 		aprint_error_dev(dev, "Assumed new PHY type was "
   9213 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9214 		    new_phytype);
   9215 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9216 	    && (sc->sc_phytype != new_phytype ))
   9217 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9218 		    "was incorrect. New PHY type = %u\n",
   9219 		    sc->sc_phytype, new_phytype);
   9220 
   9221 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9222 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9223 
   9224 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9225 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9226 		    "function was incorrect.\n");
   9227 
   9228 	/* Update now */
   9229 	sc->sc_phytype = new_phytype;
   9230 	mii->mii_readreg = new_readreg;
   9231 	mii->mii_writereg = new_writereg;
   9232 }
   9233 
   9234 /*
   9235  * wm_get_phy_id_82575:
   9236  *
   9237  * Return PHY ID. Return -1 if it failed.
   9238  */
   9239 static int
   9240 wm_get_phy_id_82575(struct wm_softc *sc)
   9241 {
   9242 	uint32_t reg;
   9243 	int phyid = -1;
   9244 
   9245 	/* XXX */
   9246 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9247 		return -1;
   9248 
   9249 	if (wm_sgmii_uses_mdio(sc)) {
   9250 		switch (sc->sc_type) {
   9251 		case WM_T_82575:
   9252 		case WM_T_82576:
   9253 			reg = CSR_READ(sc, WMREG_MDIC);
   9254 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9255 			break;
   9256 		case WM_T_82580:
   9257 		case WM_T_I350:
   9258 		case WM_T_I354:
   9259 		case WM_T_I210:
   9260 		case WM_T_I211:
   9261 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9262 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9263 			break;
   9264 		default:
   9265 			return -1;
   9266 		}
   9267 	}
   9268 
   9269 	return phyid;
   9270 }
   9271 
   9272 
   9273 /*
   9274  * wm_gmii_mediainit:
   9275  *
   9276  *	Initialize media for use on 1000BASE-T devices.
   9277  */
   9278 static void
   9279 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9280 {
   9281 	device_t dev = sc->sc_dev;
   9282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9283 	struct mii_data *mii = &sc->sc_mii;
   9284 	uint32_t reg;
   9285 
   9286 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9287 		device_xname(sc->sc_dev), __func__));
   9288 
   9289 	/* We have GMII. */
   9290 	sc->sc_flags |= WM_F_HAS_MII;
   9291 
   9292 	if (sc->sc_type == WM_T_80003)
   9293 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9294 	else
   9295 		sc->sc_tipg = TIPG_1000T_DFLT;
   9296 
   9297 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9298 	if ((sc->sc_type == WM_T_82580)
   9299 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9300 	    || (sc->sc_type == WM_T_I211)) {
   9301 		reg = CSR_READ(sc, WMREG_PHPM);
   9302 		reg &= ~PHPM_GO_LINK_D;
   9303 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9304 	}
   9305 
   9306 	/*
   9307 	 * Let the chip set speed/duplex on its own based on
   9308 	 * signals from the PHY.
   9309 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9310 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9311 	 */
   9312 	sc->sc_ctrl |= CTRL_SLU;
   9313 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9314 
   9315 	/* Initialize our media structures and probe the GMII. */
   9316 	mii->mii_ifp = ifp;
   9317 
   9318 	/*
   9319 	 * The first call of wm_mii_setup_phytype. The result might be
   9320 	 * incorrect.
   9321 	 */
   9322 	wm_gmii_setup_phytype(sc, 0, 0);
   9323 
   9324 	mii->mii_statchg = wm_gmii_statchg;
   9325 
   9326 	/* get PHY control from SMBus to PCIe */
   9327 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9328 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9329 		wm_smbustopci(sc);
   9330 
   9331 	wm_gmii_reset(sc);
   9332 
   9333 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9334 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9335 	    wm_gmii_mediastatus);
   9336 
   9337 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9338 	    || (sc->sc_type == WM_T_82580)
   9339 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9340 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9341 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9342 			/* Attach only one port */
   9343 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9344 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9345 		} else {
   9346 			int i, id;
   9347 			uint32_t ctrl_ext;
   9348 
   9349 			id = wm_get_phy_id_82575(sc);
   9350 			if (id != -1) {
   9351 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9352 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9353 			}
   9354 			if ((id == -1)
   9355 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9356 				/* Power on sgmii phy if it is disabled */
   9357 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9358 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9359 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9360 				CSR_WRITE_FLUSH(sc);
   9361 				delay(300*1000); /* XXX too long */
   9362 
   9363 				/* from 1 to 8 */
   9364 				for (i = 1; i < 8; i++)
   9365 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9366 					    0xffffffff, i, MII_OFFSET_ANY,
   9367 					    MIIF_DOPAUSE);
   9368 
   9369 				/* restore previous sfp cage power state */
   9370 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9371 			}
   9372 		}
   9373 	} else {
   9374 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9375 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9376 	}
   9377 
   9378 	/*
   9379 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9380 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9381 	 */
   9382 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9383 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9384 		wm_set_mdio_slow_mode_hv(sc);
   9385 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9386 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9387 	}
   9388 
   9389 	/*
   9390 	 * (For ICH8 variants)
   9391 	 * If PHY detection failed, use BM's r/w function and retry.
   9392 	 */
   9393 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9394 		/* if failed, retry with *_bm_* */
   9395 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9396 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9397 		    sc->sc_phytype);
   9398 		sc->sc_phytype = WMPHY_BM;
   9399 		mii->mii_readreg = wm_gmii_bm_readreg;
   9400 		mii->mii_writereg = wm_gmii_bm_writereg;
   9401 
   9402 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9403 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9404 	}
   9405 
   9406 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9407 		/* Any PHY wasn't find */
   9408 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9409 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9410 		sc->sc_phytype = WMPHY_NONE;
   9411 	} else {
   9412 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9413 
   9414 		/*
   9415 		 * PHY Found! Check PHY type again by the second call of
   9416 		 * wm_mii_setup_phytype.
   9417 		 */
   9418 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9419 		    child->mii_mpd_model);
   9420 
   9421 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9422 	}
   9423 }
   9424 
   9425 /*
   9426  * wm_gmii_mediachange:	[ifmedia interface function]
   9427  *
   9428  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9429  */
   9430 static int
   9431 wm_gmii_mediachange(struct ifnet *ifp)
   9432 {
   9433 	struct wm_softc *sc = ifp->if_softc;
   9434 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9435 	int rc;
   9436 
   9437 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9438 		device_xname(sc->sc_dev), __func__));
   9439 	if ((ifp->if_flags & IFF_UP) == 0)
   9440 		return 0;
   9441 
   9442 	/* Disable D0 LPLU. */
   9443 	wm_lplu_d0_disable(sc);
   9444 
   9445 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9446 	sc->sc_ctrl |= CTRL_SLU;
   9447 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9448 	    || (sc->sc_type > WM_T_82543)) {
   9449 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9450 	} else {
   9451 		sc->sc_ctrl &= ~CTRL_ASDE;
   9452 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9453 		if (ife->ifm_media & IFM_FDX)
   9454 			sc->sc_ctrl |= CTRL_FD;
   9455 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9456 		case IFM_10_T:
   9457 			sc->sc_ctrl |= CTRL_SPEED_10;
   9458 			break;
   9459 		case IFM_100_TX:
   9460 			sc->sc_ctrl |= CTRL_SPEED_100;
   9461 			break;
   9462 		case IFM_1000_T:
   9463 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9464 			break;
   9465 		default:
   9466 			panic("wm_gmii_mediachange: bad media 0x%x",
   9467 			    ife->ifm_media);
   9468 		}
   9469 	}
   9470 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9471 	CSR_WRITE_FLUSH(sc);
   9472 	if (sc->sc_type <= WM_T_82543)
   9473 		wm_gmii_reset(sc);
   9474 
   9475 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9476 		return 0;
   9477 	return rc;
   9478 }
   9479 
   9480 /*
   9481  * wm_gmii_mediastatus:	[ifmedia interface function]
   9482  *
   9483  *	Get the current interface media status on a 1000BASE-T device.
   9484  */
   9485 static void
   9486 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9487 {
   9488 	struct wm_softc *sc = ifp->if_softc;
   9489 
   9490 	ether_mediastatus(ifp, ifmr);
   9491 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9492 	    | sc->sc_flowflags;
   9493 }
   9494 
   9495 #define	MDI_IO		CTRL_SWDPIN(2)
   9496 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9497 #define	MDI_CLK		CTRL_SWDPIN(3)
   9498 
   9499 static void
   9500 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9501 {
   9502 	uint32_t i, v;
   9503 
   9504 	v = CSR_READ(sc, WMREG_CTRL);
   9505 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9506 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9507 
   9508 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9509 		if (data & i)
   9510 			v |= MDI_IO;
   9511 		else
   9512 			v &= ~MDI_IO;
   9513 		CSR_WRITE(sc, WMREG_CTRL, v);
   9514 		CSR_WRITE_FLUSH(sc);
   9515 		delay(10);
   9516 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9517 		CSR_WRITE_FLUSH(sc);
   9518 		delay(10);
   9519 		CSR_WRITE(sc, WMREG_CTRL, v);
   9520 		CSR_WRITE_FLUSH(sc);
   9521 		delay(10);
   9522 	}
   9523 }
   9524 
   9525 static uint32_t
   9526 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9527 {
   9528 	uint32_t v, i, data = 0;
   9529 
   9530 	v = CSR_READ(sc, WMREG_CTRL);
   9531 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9532 	v |= CTRL_SWDPIO(3);
   9533 
   9534 	CSR_WRITE(sc, WMREG_CTRL, v);
   9535 	CSR_WRITE_FLUSH(sc);
   9536 	delay(10);
   9537 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9538 	CSR_WRITE_FLUSH(sc);
   9539 	delay(10);
   9540 	CSR_WRITE(sc, WMREG_CTRL, v);
   9541 	CSR_WRITE_FLUSH(sc);
   9542 	delay(10);
   9543 
   9544 	for (i = 0; i < 16; i++) {
   9545 		data <<= 1;
   9546 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9547 		CSR_WRITE_FLUSH(sc);
   9548 		delay(10);
   9549 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9550 			data |= 1;
   9551 		CSR_WRITE(sc, WMREG_CTRL, v);
   9552 		CSR_WRITE_FLUSH(sc);
   9553 		delay(10);
   9554 	}
   9555 
   9556 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9557 	CSR_WRITE_FLUSH(sc);
   9558 	delay(10);
   9559 	CSR_WRITE(sc, WMREG_CTRL, v);
   9560 	CSR_WRITE_FLUSH(sc);
   9561 	delay(10);
   9562 
   9563 	return data;
   9564 }
   9565 
   9566 #undef MDI_IO
   9567 #undef MDI_DIR
   9568 #undef MDI_CLK
   9569 
   9570 /*
   9571  * wm_gmii_i82543_readreg:	[mii interface function]
   9572  *
   9573  *	Read a PHY register on the GMII (i82543 version).
   9574  */
   9575 static int
   9576 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9577 {
   9578 	struct wm_softc *sc = device_private(dev);
   9579 	int rv;
   9580 
   9581 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9582 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9583 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9584 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9585 
   9586 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9587 	    device_xname(dev), phy, reg, rv));
   9588 
   9589 	return rv;
   9590 }
   9591 
   9592 /*
   9593  * wm_gmii_i82543_writereg:	[mii interface function]
   9594  *
   9595  *	Write a PHY register on the GMII (i82543 version).
   9596  */
   9597 static void
   9598 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9599 {
   9600 	struct wm_softc *sc = device_private(dev);
   9601 
   9602 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9603 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9604 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9605 	    (MII_COMMAND_START << 30), 32);
   9606 }
   9607 
   9608 /*
   9609  * wm_gmii_mdic_readreg:	[mii interface function]
   9610  *
   9611  *	Read a PHY register on the GMII.
   9612  */
   9613 static int
   9614 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9615 {
   9616 	struct wm_softc *sc = device_private(dev);
   9617 	uint32_t mdic = 0;
   9618 	int i, rv;
   9619 
   9620 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9621 	    MDIC_REGADD(reg));
   9622 
   9623 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9624 		mdic = CSR_READ(sc, WMREG_MDIC);
   9625 		if (mdic & MDIC_READY)
   9626 			break;
   9627 		delay(50);
   9628 	}
   9629 
   9630 	if ((mdic & MDIC_READY) == 0) {
   9631 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9632 		    device_xname(dev), phy, reg);
   9633 		rv = 0;
   9634 	} else if (mdic & MDIC_E) {
   9635 #if 0 /* This is normal if no PHY is present. */
   9636 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9637 		    device_xname(dev), phy, reg);
   9638 #endif
   9639 		rv = 0;
   9640 	} else {
   9641 		rv = MDIC_DATA(mdic);
   9642 		if (rv == 0xffff)
   9643 			rv = 0;
   9644 	}
   9645 
   9646 	return rv;
   9647 }
   9648 
   9649 /*
   9650  * wm_gmii_mdic_writereg:	[mii interface function]
   9651  *
   9652  *	Write a PHY register on the GMII.
   9653  */
   9654 static void
   9655 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9656 {
   9657 	struct wm_softc *sc = device_private(dev);
   9658 	uint32_t mdic = 0;
   9659 	int i;
   9660 
   9661 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9662 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9663 
   9664 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9665 		mdic = CSR_READ(sc, WMREG_MDIC);
   9666 		if (mdic & MDIC_READY)
   9667 			break;
   9668 		delay(50);
   9669 	}
   9670 
   9671 	if ((mdic & MDIC_READY) == 0)
   9672 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9673 		    device_xname(dev), phy, reg);
   9674 	else if (mdic & MDIC_E)
   9675 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9676 		    device_xname(dev), phy, reg);
   9677 }
   9678 
   9679 /*
   9680  * wm_gmii_i82544_readreg:	[mii interface function]
   9681  *
   9682  *	Read a PHY register on the GMII.
   9683  */
   9684 static int
   9685 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9686 {
   9687 	struct wm_softc *sc = device_private(dev);
   9688 	int rv;
   9689 
   9690 	if (sc->phy.acquire(sc)) {
   9691 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9692 		return 0;
   9693 	}
   9694 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   9695 	sc->phy.release(sc);
   9696 
   9697 	return rv;
   9698 }
   9699 
   9700 /*
   9701  * wm_gmii_i82544_writereg:	[mii interface function]
   9702  *
   9703  *	Write a PHY register on the GMII.
   9704  */
   9705 static void
   9706 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9707 {
   9708 	struct wm_softc *sc = device_private(dev);
   9709 
   9710 	if (sc->phy.acquire(sc)) {
   9711 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9712 		return;
   9713 	}
   9714 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   9715 	sc->phy.release(sc);
   9716 }
   9717 
   9718 /*
   9719  * wm_gmii_i80003_readreg:	[mii interface function]
   9720  *
   9721  *	Read a PHY register on the kumeran
   9722  * This could be handled by the PHY layer if we didn't have to lock the
   9723  * ressource ...
   9724  */
   9725 static int
   9726 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9727 {
   9728 	struct wm_softc *sc = device_private(dev);
   9729 	int rv;
   9730 
   9731 	if (phy != 1) /* only one PHY on kumeran bus */
   9732 		return 0;
   9733 
   9734 	if (sc->phy.acquire(sc)) {
   9735 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9736 		return 0;
   9737 	}
   9738 
   9739 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9740 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9741 		    reg >> GG82563_PAGE_SHIFT);
   9742 	} else {
   9743 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9744 		    reg >> GG82563_PAGE_SHIFT);
   9745 	}
   9746 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9747 	delay(200);
   9748 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9749 	delay(200);
   9750 	sc->phy.release(sc);
   9751 
   9752 	return rv;
   9753 }
   9754 
   9755 /*
   9756  * wm_gmii_i80003_writereg:	[mii interface function]
   9757  *
   9758  *	Write a PHY register on the kumeran.
   9759  * This could be handled by the PHY layer if we didn't have to lock the
   9760  * ressource ...
   9761  */
   9762 static void
   9763 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9764 {
   9765 	struct wm_softc *sc = device_private(dev);
   9766 
   9767 	if (phy != 1) /* only one PHY on kumeran bus */
   9768 		return;
   9769 
   9770 	if (sc->phy.acquire(sc)) {
   9771 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9772 		return;
   9773 	}
   9774 
   9775 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9776 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9777 		    reg >> GG82563_PAGE_SHIFT);
   9778 	} else {
   9779 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9780 		    reg >> GG82563_PAGE_SHIFT);
   9781 	}
   9782 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9783 	delay(200);
   9784 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9785 	delay(200);
   9786 
   9787 	sc->phy.release(sc);
   9788 }
   9789 
   9790 /*
   9791  * wm_gmii_bm_readreg:	[mii interface function]
   9792  *
   9793  *	Read a PHY register on the kumeran
   9794  * This could be handled by the PHY layer if we didn't have to lock the
   9795  * ressource ...
   9796  */
   9797 static int
   9798 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   9799 {
   9800 	struct wm_softc *sc = device_private(dev);
   9801 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9802 	uint16_t val;
   9803 	int rv;
   9804 
   9805 	if (sc->phy.acquire(sc)) {
   9806 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9807 		return 0;
   9808 	}
   9809 
   9810 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9811 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9812 		    || (reg == 31)) ? 1 : phy;
   9813 	/* Page 800 works differently than the rest so it has its own func */
   9814 	if (page == BM_WUC_PAGE) {
   9815 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   9816 		rv = val;
   9817 		goto release;
   9818 	}
   9819 
   9820 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9821 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9822 		    && (sc->sc_type != WM_T_82583))
   9823 			wm_gmii_mdic_writereg(dev, phy,
   9824 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9825 		else
   9826 			wm_gmii_mdic_writereg(dev, phy,
   9827 			    BME1000_PHY_PAGE_SELECT, page);
   9828 	}
   9829 
   9830 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9831 
   9832 release:
   9833 	sc->phy.release(sc);
   9834 	return rv;
   9835 }
   9836 
   9837 /*
   9838  * wm_gmii_bm_writereg:	[mii interface function]
   9839  *
   9840  *	Write a PHY register on the kumeran.
   9841  * This could be handled by the PHY layer if we didn't have to lock the
   9842  * ressource ...
   9843  */
   9844 static void
   9845 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   9846 {
   9847 	struct wm_softc *sc = device_private(dev);
   9848 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9849 
   9850 	if (sc->phy.acquire(sc)) {
   9851 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9852 		return;
   9853 	}
   9854 
   9855 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9856 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9857 		    || (reg == 31)) ? 1 : phy;
   9858 	/* Page 800 works differently than the rest so it has its own func */
   9859 	if (page == BM_WUC_PAGE) {
   9860 		uint16_t tmp;
   9861 
   9862 		tmp = val;
   9863 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   9864 		goto release;
   9865 	}
   9866 
   9867 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9868 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9869 		    && (sc->sc_type != WM_T_82583))
   9870 			wm_gmii_mdic_writereg(dev, phy,
   9871 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9872 		else
   9873 			wm_gmii_mdic_writereg(dev, phy,
   9874 			    BME1000_PHY_PAGE_SELECT, page);
   9875 	}
   9876 
   9877 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9878 
   9879 release:
   9880 	sc->phy.release(sc);
   9881 }
   9882 
   9883 static void
   9884 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   9885 {
   9886 	struct wm_softc *sc = device_private(dev);
   9887 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9888 	uint16_t wuce, reg;
   9889 
   9890 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9891 		device_xname(dev), __func__));
   9892 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9893 	if (sc->sc_type == WM_T_PCH) {
   9894 		/* XXX e1000 driver do nothing... why? */
   9895 	}
   9896 
   9897 	/*
   9898 	 * 1) Enable PHY wakeup register first.
   9899 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9900 	 */
   9901 
   9902 	/* Set page 769 */
   9903 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   9904 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9905 
   9906 	/* Read WUCE and save it */
   9907 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   9908 
   9909 	reg = wuce | BM_WUC_ENABLE_BIT;
   9910 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9911 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   9912 
   9913 	/* Select page 800 */
   9914 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   9915 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9916 
   9917 	/*
   9918 	 * 2) Access PHY wakeup register.
   9919 	 * See e1000_access_phy_wakeup_reg_bm.
   9920 	 */
   9921 
   9922 	/* Write page 800 */
   9923 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9924 
   9925 	if (rd)
   9926 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   9927 	else
   9928 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   9929 
   9930 	/*
   9931 	 * 3) Disable PHY wakeup register.
   9932 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9933 	 */
   9934 	/* Set page 769 */
   9935 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   9936 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9937 
   9938 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   9939 }
   9940 
   9941 /*
   9942  * wm_gmii_hv_readreg:	[mii interface function]
   9943  *
   9944  *	Read a PHY register on the kumeran
   9945  * This could be handled by the PHY layer if we didn't have to lock the
   9946  * ressource ...
   9947  */
   9948 static int
   9949 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   9950 {
   9951 	struct wm_softc *sc = device_private(dev);
   9952 	int rv;
   9953 
   9954 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9955 		device_xname(dev), __func__));
   9956 	if (sc->phy.acquire(sc)) {
   9957 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9958 		return 0;
   9959 	}
   9960 
   9961 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   9962 	sc->phy.release(sc);
   9963 	return rv;
   9964 }
   9965 
   9966 static int
   9967 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   9968 {
   9969 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9970 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9971 	uint16_t val;
   9972 	int rv;
   9973 
   9974 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9975 
   9976 	/* Page 800 works differently than the rest so it has its own func */
   9977 	if (page == BM_WUC_PAGE) {
   9978 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   9979 		return val;
   9980 	}
   9981 
   9982 	/*
   9983 	 * Lower than page 768 works differently than the rest so it has its
   9984 	 * own func
   9985 	 */
   9986 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9987 		printf("gmii_hv_readreg!!!\n");
   9988 		return 0;
   9989 	}
   9990 
   9991 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9992 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   9993 		    page << BME1000_PAGE_SHIFT);
   9994 	}
   9995 
   9996 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   9997 	return rv;
   9998 }
   9999 
   10000 /*
   10001  * wm_gmii_hv_writereg:	[mii interface function]
   10002  *
   10003  *	Write a PHY register on the kumeran.
   10004  * This could be handled by the PHY layer if we didn't have to lock the
   10005  * ressource ...
   10006  */
   10007 static void
   10008 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10009 {
   10010 	struct wm_softc *sc = device_private(dev);
   10011 
   10012 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10013 		device_xname(dev), __func__));
   10014 
   10015 	if (sc->phy.acquire(sc)) {
   10016 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10017 		return;
   10018 	}
   10019 
   10020 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10021 	sc->phy.release(sc);
   10022 }
   10023 
   10024 static void
   10025 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10026 {
   10027 	struct wm_softc *sc = device_private(dev);
   10028 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10029 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10030 
   10031 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10032 
   10033 	/* Page 800 works differently than the rest so it has its own func */
   10034 	if (page == BM_WUC_PAGE) {
   10035 		uint16_t tmp;
   10036 
   10037 		tmp = val;
   10038 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10039 		return;
   10040 	}
   10041 
   10042 	/*
   10043 	 * Lower than page 768 works differently than the rest so it has its
   10044 	 * own func
   10045 	 */
   10046 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10047 		printf("gmii_hv_writereg!!!\n");
   10048 		return;
   10049 	}
   10050 
   10051 	{
   10052 		/*
   10053 		 * XXX Workaround MDIO accesses being disabled after entering
   10054 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10055 		 * register is set)
   10056 		 */
   10057 		if (sc->sc_phytype == WMPHY_82578) {
   10058 			struct mii_softc *child;
   10059 
   10060 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10061 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10062 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10063 			    && ((val & (1 << 11)) != 0)) {
   10064 				printf("XXX need workaround\n");
   10065 			}
   10066 		}
   10067 
   10068 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10069 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10070 			    page << BME1000_PAGE_SHIFT);
   10071 		}
   10072 	}
   10073 
   10074 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10075 }
   10076 
   10077 /*
   10078  * wm_gmii_82580_readreg:	[mii interface function]
   10079  *
   10080  *	Read a PHY register on the 82580 and I350.
   10081  * This could be handled by the PHY layer if we didn't have to lock the
   10082  * ressource ...
   10083  */
   10084 static int
   10085 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10086 {
   10087 	struct wm_softc *sc = device_private(dev);
   10088 	int rv;
   10089 
   10090 	if (sc->phy.acquire(sc) != 0) {
   10091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10092 		return 0;
   10093 	}
   10094 
   10095 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10096 
   10097 	sc->phy.release(sc);
   10098 	return rv;
   10099 }
   10100 
   10101 /*
   10102  * wm_gmii_82580_writereg:	[mii interface function]
   10103  *
   10104  *	Write a PHY register on the 82580 and I350.
   10105  * This could be handled by the PHY layer if we didn't have to lock the
   10106  * ressource ...
   10107  */
   10108 static void
   10109 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10110 {
   10111 	struct wm_softc *sc = device_private(dev);
   10112 
   10113 	if (sc->phy.acquire(sc) != 0) {
   10114 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10115 		return;
   10116 	}
   10117 
   10118 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10119 
   10120 	sc->phy.release(sc);
   10121 }
   10122 
   10123 /*
   10124  * wm_gmii_gs40g_readreg:	[mii interface function]
   10125  *
   10126  *	Read a PHY register on the I2100 and I211.
   10127  * This could be handled by the PHY layer if we didn't have to lock the
   10128  * ressource ...
   10129  */
   10130 static int
   10131 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10132 {
   10133 	struct wm_softc *sc = device_private(dev);
   10134 	int page, offset;
   10135 	int rv;
   10136 
   10137 	/* Acquire semaphore */
   10138 	if (sc->phy.acquire(sc)) {
   10139 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10140 		return 0;
   10141 	}
   10142 
   10143 	/* Page select */
   10144 	page = reg >> GS40G_PAGE_SHIFT;
   10145 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10146 
   10147 	/* Read reg */
   10148 	offset = reg & GS40G_OFFSET_MASK;
   10149 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10150 
   10151 	sc->phy.release(sc);
   10152 	return rv;
   10153 }
   10154 
   10155 /*
   10156  * wm_gmii_gs40g_writereg:	[mii interface function]
   10157  *
   10158  *	Write a PHY register on the I210 and I211.
   10159  * This could be handled by the PHY layer if we didn't have to lock the
   10160  * ressource ...
   10161  */
   10162 static void
   10163 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10164 {
   10165 	struct wm_softc *sc = device_private(dev);
   10166 	int page, offset;
   10167 
   10168 	/* Acquire semaphore */
   10169 	if (sc->phy.acquire(sc)) {
   10170 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10171 		return;
   10172 	}
   10173 
   10174 	/* Page select */
   10175 	page = reg >> GS40G_PAGE_SHIFT;
   10176 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10177 
   10178 	/* Write reg */
   10179 	offset = reg & GS40G_OFFSET_MASK;
   10180 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10181 
   10182 	/* Release semaphore */
   10183 	sc->phy.release(sc);
   10184 }
   10185 
   10186 /*
   10187  * wm_gmii_statchg:	[mii interface function]
   10188  *
   10189  *	Callback from MII layer when media changes.
   10190  */
   10191 static void
   10192 wm_gmii_statchg(struct ifnet *ifp)
   10193 {
   10194 	struct wm_softc *sc = ifp->if_softc;
   10195 	struct mii_data *mii = &sc->sc_mii;
   10196 
   10197 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10198 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10199 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10200 
   10201 	/*
   10202 	 * Get flow control negotiation result.
   10203 	 */
   10204 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10205 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10206 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10207 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10208 	}
   10209 
   10210 	if (sc->sc_flowflags & IFM_FLOW) {
   10211 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10212 			sc->sc_ctrl |= CTRL_TFCE;
   10213 			sc->sc_fcrtl |= FCRTL_XONE;
   10214 		}
   10215 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10216 			sc->sc_ctrl |= CTRL_RFCE;
   10217 	}
   10218 
   10219 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10220 		DPRINTF(WM_DEBUG_LINK,
   10221 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10222 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10223 	} else {
   10224 		DPRINTF(WM_DEBUG_LINK,
   10225 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10226 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10227 	}
   10228 
   10229 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10230 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10231 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10232 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10233 	if (sc->sc_type == WM_T_80003) {
   10234 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10235 		case IFM_1000_T:
   10236 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10237 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10238 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10239 			break;
   10240 		default:
   10241 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10242 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10243 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10244 			break;
   10245 		}
   10246 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10247 	}
   10248 }
   10249 
   10250 /* kumeran related (80003, ICH* and PCH*) */
   10251 
   10252 /*
   10253  * wm_kmrn_readreg:
   10254  *
   10255  *	Read a kumeran register
   10256  */
   10257 static int
   10258 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10259 {
   10260 	int rv;
   10261 
   10262 	if (sc->sc_type == WM_T_80003)
   10263 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10264 	else
   10265 		rv = sc->phy.acquire(sc);
   10266 	if (rv != 0) {
   10267 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10268 		    __func__);
   10269 		return 0;
   10270 	}
   10271 
   10272 	rv = wm_kmrn_readreg_locked(sc, reg);
   10273 
   10274 	if (sc->sc_type == WM_T_80003)
   10275 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10276 	else
   10277 		sc->phy.release(sc);
   10278 
   10279 	return rv;
   10280 }
   10281 
   10282 static int
   10283 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10284 {
   10285 	int rv;
   10286 
   10287 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10288 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10289 	    KUMCTRLSTA_REN);
   10290 	CSR_WRITE_FLUSH(sc);
   10291 	delay(2);
   10292 
   10293 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10294 
   10295 	return rv;
   10296 }
   10297 
   10298 /*
   10299  * wm_kmrn_writereg:
   10300  *
   10301  *	Write a kumeran register
   10302  */
   10303 static void
   10304 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10305 {
   10306 	int rv;
   10307 
   10308 	if (sc->sc_type == WM_T_80003)
   10309 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10310 	else
   10311 		rv = sc->phy.acquire(sc);
   10312 	if (rv != 0) {
   10313 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10314 		    __func__);
   10315 		return;
   10316 	}
   10317 
   10318 	wm_kmrn_writereg_locked(sc, reg, val);
   10319 
   10320 	if (sc->sc_type == WM_T_80003)
   10321 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10322 	else
   10323 		sc->phy.release(sc);
   10324 }
   10325 
   10326 static void
   10327 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10328 {
   10329 
   10330 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10331 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10332 	    (val & KUMCTRLSTA_MASK));
   10333 }
   10334 
   10335 /* SGMII related */
   10336 
   10337 /*
   10338  * wm_sgmii_uses_mdio
   10339  *
   10340  * Check whether the transaction is to the internal PHY or the external
   10341  * MDIO interface. Return true if it's MDIO.
   10342  */
   10343 static bool
   10344 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10345 {
   10346 	uint32_t reg;
   10347 	bool ismdio = false;
   10348 
   10349 	switch (sc->sc_type) {
   10350 	case WM_T_82575:
   10351 	case WM_T_82576:
   10352 		reg = CSR_READ(sc, WMREG_MDIC);
   10353 		ismdio = ((reg & MDIC_DEST) != 0);
   10354 		break;
   10355 	case WM_T_82580:
   10356 	case WM_T_I350:
   10357 	case WM_T_I354:
   10358 	case WM_T_I210:
   10359 	case WM_T_I211:
   10360 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10361 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10362 		break;
   10363 	default:
   10364 		break;
   10365 	}
   10366 
   10367 	return ismdio;
   10368 }
   10369 
   10370 /*
   10371  * wm_sgmii_readreg:	[mii interface function]
   10372  *
   10373  *	Read a PHY register on the SGMII
   10374  * This could be handled by the PHY layer if we didn't have to lock the
   10375  * ressource ...
   10376  */
   10377 static int
   10378 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10379 {
   10380 	struct wm_softc *sc = device_private(dev);
   10381 	uint32_t i2ccmd;
   10382 	int i, rv;
   10383 
   10384 	if (sc->phy.acquire(sc)) {
   10385 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10386 		return 0;
   10387 	}
   10388 
   10389 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10390 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10391 	    | I2CCMD_OPCODE_READ;
   10392 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10393 
   10394 	/* Poll the ready bit */
   10395 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10396 		delay(50);
   10397 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10398 		if (i2ccmd & I2CCMD_READY)
   10399 			break;
   10400 	}
   10401 	if ((i2ccmd & I2CCMD_READY) == 0)
   10402 		device_printf(dev, "I2CCMD Read did not complete\n");
   10403 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10404 		device_printf(dev, "I2CCMD Error bit set\n");
   10405 
   10406 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10407 
   10408 	sc->phy.release(sc);
   10409 	return rv;
   10410 }
   10411 
   10412 /*
   10413  * wm_sgmii_writereg:	[mii interface function]
   10414  *
   10415  *	Write a PHY register on the SGMII.
   10416  * This could be handled by the PHY layer if we didn't have to lock the
   10417  * ressource ...
   10418  */
   10419 static void
   10420 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10421 {
   10422 	struct wm_softc *sc = device_private(dev);
   10423 	uint32_t i2ccmd;
   10424 	int i;
   10425 	int val_swapped;
   10426 
   10427 	if (sc->phy.acquire(sc) != 0) {
   10428 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10429 		return;
   10430 	}
   10431 	/* Swap the data bytes for the I2C interface */
   10432 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10433 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10434 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10435 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10436 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10437 
   10438 	/* Poll the ready bit */
   10439 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10440 		delay(50);
   10441 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10442 		if (i2ccmd & I2CCMD_READY)
   10443 			break;
   10444 	}
   10445 	if ((i2ccmd & I2CCMD_READY) == 0)
   10446 		device_printf(dev, "I2CCMD Write did not complete\n");
   10447 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10448 		device_printf(dev, "I2CCMD Error bit set\n");
   10449 
   10450 	sc->phy.release(sc);
   10451 }
   10452 
   10453 /* TBI related */
   10454 
   10455 /*
   10456  * wm_tbi_mediainit:
   10457  *
   10458  *	Initialize media for use on 1000BASE-X devices.
   10459  */
   10460 static void
   10461 wm_tbi_mediainit(struct wm_softc *sc)
   10462 {
   10463 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10464 	const char *sep = "";
   10465 
   10466 	if (sc->sc_type < WM_T_82543)
   10467 		sc->sc_tipg = TIPG_WM_DFLT;
   10468 	else
   10469 		sc->sc_tipg = TIPG_LG_DFLT;
   10470 
   10471 	sc->sc_tbi_serdes_anegticks = 5;
   10472 
   10473 	/* Initialize our media structures */
   10474 	sc->sc_mii.mii_ifp = ifp;
   10475 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10476 
   10477 	if ((sc->sc_type >= WM_T_82575)
   10478 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10479 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10480 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10481 	else
   10482 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10483 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10484 
   10485 	/*
   10486 	 * SWD Pins:
   10487 	 *
   10488 	 *	0 = Link LED (output)
   10489 	 *	1 = Loss Of Signal (input)
   10490 	 */
   10491 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10492 
   10493 	/* XXX Perhaps this is only for TBI */
   10494 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10495 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10496 
   10497 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10498 		sc->sc_ctrl &= ~CTRL_LRST;
   10499 
   10500 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10501 
   10502 #define	ADD(ss, mm, dd)							\
   10503 do {									\
   10504 	aprint_normal("%s%s", sep, ss);					\
   10505 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10506 	sep = ", ";							\
   10507 } while (/*CONSTCOND*/0)
   10508 
   10509 	aprint_normal_dev(sc->sc_dev, "");
   10510 
   10511 	if (sc->sc_type == WM_T_I354) {
   10512 		uint32_t status;
   10513 
   10514 		status = CSR_READ(sc, WMREG_STATUS);
   10515 		if (((status & STATUS_2P5_SKU) != 0)
   10516 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10517 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10518 		} else
   10519 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10520 	} else if (sc->sc_type == WM_T_82545) {
   10521 		/* Only 82545 is LX (XXX except SFP) */
   10522 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10523 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10524 	} else {
   10525 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10526 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10527 	}
   10528 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10529 	aprint_normal("\n");
   10530 
   10531 #undef ADD
   10532 
   10533 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10534 }
   10535 
   10536 /*
   10537  * wm_tbi_mediachange:	[ifmedia interface function]
   10538  *
   10539  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10540  */
   10541 static int
   10542 wm_tbi_mediachange(struct ifnet *ifp)
   10543 {
   10544 	struct wm_softc *sc = ifp->if_softc;
   10545 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10546 	uint32_t status;
   10547 	int i;
   10548 
   10549 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10550 		/* XXX need some work for >= 82571 and < 82575 */
   10551 		if (sc->sc_type < WM_T_82575)
   10552 			return 0;
   10553 	}
   10554 
   10555 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10556 	    || (sc->sc_type >= WM_T_82575))
   10557 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10558 
   10559 	sc->sc_ctrl &= ~CTRL_LRST;
   10560 	sc->sc_txcw = TXCW_ANE;
   10561 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10562 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10563 	else if (ife->ifm_media & IFM_FDX)
   10564 		sc->sc_txcw |= TXCW_FD;
   10565 	else
   10566 		sc->sc_txcw |= TXCW_HD;
   10567 
   10568 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10569 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10570 
   10571 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10572 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10573 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10574 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10575 	CSR_WRITE_FLUSH(sc);
   10576 	delay(1000);
   10577 
   10578 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10579 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10580 
   10581 	/*
   10582 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10583 	 * optics detect a signal, 0 if they don't.
   10584 	 */
   10585 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10586 		/* Have signal; wait for the link to come up. */
   10587 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10588 			delay(10000);
   10589 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10590 				break;
   10591 		}
   10592 
   10593 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10594 			    device_xname(sc->sc_dev),i));
   10595 
   10596 		status = CSR_READ(sc, WMREG_STATUS);
   10597 		DPRINTF(WM_DEBUG_LINK,
   10598 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10599 			device_xname(sc->sc_dev),status, STATUS_LU));
   10600 		if (status & STATUS_LU) {
   10601 			/* Link is up. */
   10602 			DPRINTF(WM_DEBUG_LINK,
   10603 			    ("%s: LINK: set media -> link up %s\n",
   10604 			    device_xname(sc->sc_dev),
   10605 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10606 
   10607 			/*
   10608 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10609 			 * so we should update sc->sc_ctrl
   10610 			 */
   10611 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10612 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10613 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10614 			if (status & STATUS_FD)
   10615 				sc->sc_tctl |=
   10616 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10617 			else
   10618 				sc->sc_tctl |=
   10619 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10620 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10621 				sc->sc_fcrtl |= FCRTL_XONE;
   10622 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10623 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10624 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10625 				      sc->sc_fcrtl);
   10626 			sc->sc_tbi_linkup = 1;
   10627 		} else {
   10628 			if (i == WM_LINKUP_TIMEOUT)
   10629 				wm_check_for_link(sc);
   10630 			/* Link is down. */
   10631 			DPRINTF(WM_DEBUG_LINK,
   10632 			    ("%s: LINK: set media -> link down\n",
   10633 			    device_xname(sc->sc_dev)));
   10634 			sc->sc_tbi_linkup = 0;
   10635 		}
   10636 	} else {
   10637 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10638 		    device_xname(sc->sc_dev)));
   10639 		sc->sc_tbi_linkup = 0;
   10640 	}
   10641 
   10642 	wm_tbi_serdes_set_linkled(sc);
   10643 
   10644 	return 0;
   10645 }
   10646 
   10647 /*
   10648  * wm_tbi_mediastatus:	[ifmedia interface function]
   10649  *
   10650  *	Get the current interface media status on a 1000BASE-X device.
   10651  */
   10652 static void
   10653 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10654 {
   10655 	struct wm_softc *sc = ifp->if_softc;
   10656 	uint32_t ctrl, status;
   10657 
   10658 	ifmr->ifm_status = IFM_AVALID;
   10659 	ifmr->ifm_active = IFM_ETHER;
   10660 
   10661 	status = CSR_READ(sc, WMREG_STATUS);
   10662 	if ((status & STATUS_LU) == 0) {
   10663 		ifmr->ifm_active |= IFM_NONE;
   10664 		return;
   10665 	}
   10666 
   10667 	ifmr->ifm_status |= IFM_ACTIVE;
   10668 	/* Only 82545 is LX */
   10669 	if (sc->sc_type == WM_T_82545)
   10670 		ifmr->ifm_active |= IFM_1000_LX;
   10671 	else
   10672 		ifmr->ifm_active |= IFM_1000_SX;
   10673 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10674 		ifmr->ifm_active |= IFM_FDX;
   10675 	else
   10676 		ifmr->ifm_active |= IFM_HDX;
   10677 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10678 	if (ctrl & CTRL_RFCE)
   10679 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10680 	if (ctrl & CTRL_TFCE)
   10681 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10682 }
   10683 
   10684 /* XXX TBI only */
   10685 static int
   10686 wm_check_for_link(struct wm_softc *sc)
   10687 {
   10688 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10689 	uint32_t rxcw;
   10690 	uint32_t ctrl;
   10691 	uint32_t status;
   10692 	uint32_t sig;
   10693 
   10694 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10695 		/* XXX need some work for >= 82571 */
   10696 		if (sc->sc_type >= WM_T_82571) {
   10697 			sc->sc_tbi_linkup = 1;
   10698 			return 0;
   10699 		}
   10700 	}
   10701 
   10702 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10703 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10704 	status = CSR_READ(sc, WMREG_STATUS);
   10705 
   10706 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10707 
   10708 	DPRINTF(WM_DEBUG_LINK,
   10709 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10710 		device_xname(sc->sc_dev), __func__,
   10711 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10712 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10713 
   10714 	/*
   10715 	 * SWDPIN   LU RXCW
   10716 	 *      0    0    0
   10717 	 *      0    0    1	(should not happen)
   10718 	 *      0    1    0	(should not happen)
   10719 	 *      0    1    1	(should not happen)
   10720 	 *      1    0    0	Disable autonego and force linkup
   10721 	 *      1    0    1	got /C/ but not linkup yet
   10722 	 *      1    1    0	(linkup)
   10723 	 *      1    1    1	If IFM_AUTO, back to autonego
   10724 	 *
   10725 	 */
   10726 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10727 	    && ((status & STATUS_LU) == 0)
   10728 	    && ((rxcw & RXCW_C) == 0)) {
   10729 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10730 			__func__));
   10731 		sc->sc_tbi_linkup = 0;
   10732 		/* Disable auto-negotiation in the TXCW register */
   10733 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10734 
   10735 		/*
   10736 		 * Force link-up and also force full-duplex.
   10737 		 *
   10738 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10739 		 * so we should update sc->sc_ctrl
   10740 		 */
   10741 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10742 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10743 	} else if (((status & STATUS_LU) != 0)
   10744 	    && ((rxcw & RXCW_C) != 0)
   10745 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10746 		sc->sc_tbi_linkup = 1;
   10747 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10748 			__func__));
   10749 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10750 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10751 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10752 	    && ((rxcw & RXCW_C) != 0)) {
   10753 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10754 	} else {
   10755 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10756 			status));
   10757 	}
   10758 
   10759 	return 0;
   10760 }
   10761 
   10762 /*
   10763  * wm_tbi_tick:
   10764  *
   10765  *	Check the link on TBI devices.
   10766  *	This function acts as mii_tick().
   10767  */
   10768 static void
   10769 wm_tbi_tick(struct wm_softc *sc)
   10770 {
   10771 	struct mii_data *mii = &sc->sc_mii;
   10772 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10773 	uint32_t status;
   10774 
   10775 	KASSERT(WM_CORE_LOCKED(sc));
   10776 
   10777 	status = CSR_READ(sc, WMREG_STATUS);
   10778 
   10779 	/* XXX is this needed? */
   10780 	(void)CSR_READ(sc, WMREG_RXCW);
   10781 	(void)CSR_READ(sc, WMREG_CTRL);
   10782 
   10783 	/* set link status */
   10784 	if ((status & STATUS_LU) == 0) {
   10785 		DPRINTF(WM_DEBUG_LINK,
   10786 		    ("%s: LINK: checklink -> down\n",
   10787 			device_xname(sc->sc_dev)));
   10788 		sc->sc_tbi_linkup = 0;
   10789 	} else if (sc->sc_tbi_linkup == 0) {
   10790 		DPRINTF(WM_DEBUG_LINK,
   10791 		    ("%s: LINK: checklink -> up %s\n",
   10792 			device_xname(sc->sc_dev),
   10793 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10794 		sc->sc_tbi_linkup = 1;
   10795 		sc->sc_tbi_serdes_ticks = 0;
   10796 	}
   10797 
   10798 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10799 		goto setled;
   10800 
   10801 	if ((status & STATUS_LU) == 0) {
   10802 		sc->sc_tbi_linkup = 0;
   10803 		/* If the timer expired, retry autonegotiation */
   10804 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10805 		    && (++sc->sc_tbi_serdes_ticks
   10806 			>= sc->sc_tbi_serdes_anegticks)) {
   10807 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10808 			sc->sc_tbi_serdes_ticks = 0;
   10809 			/*
   10810 			 * Reset the link, and let autonegotiation do
   10811 			 * its thing
   10812 			 */
   10813 			sc->sc_ctrl |= CTRL_LRST;
   10814 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10815 			CSR_WRITE_FLUSH(sc);
   10816 			delay(1000);
   10817 			sc->sc_ctrl &= ~CTRL_LRST;
   10818 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10819 			CSR_WRITE_FLUSH(sc);
   10820 			delay(1000);
   10821 			CSR_WRITE(sc, WMREG_TXCW,
   10822 			    sc->sc_txcw & ~TXCW_ANE);
   10823 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10824 		}
   10825 	}
   10826 
   10827 setled:
   10828 	wm_tbi_serdes_set_linkled(sc);
   10829 }
   10830 
   10831 /* SERDES related */
   10832 static void
   10833 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10834 {
   10835 	uint32_t reg;
   10836 
   10837 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10838 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10839 		return;
   10840 
   10841 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10842 	reg |= PCS_CFG_PCS_EN;
   10843 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10844 
   10845 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10846 	reg &= ~CTRL_EXT_SWDPIN(3);
   10847 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10848 	CSR_WRITE_FLUSH(sc);
   10849 }
   10850 
   10851 static int
   10852 wm_serdes_mediachange(struct ifnet *ifp)
   10853 {
   10854 	struct wm_softc *sc = ifp->if_softc;
   10855 	bool pcs_autoneg = true; /* XXX */
   10856 	uint32_t ctrl_ext, pcs_lctl, reg;
   10857 
   10858 	/* XXX Currently, this function is not called on 8257[12] */
   10859 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10860 	    || (sc->sc_type >= WM_T_82575))
   10861 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10862 
   10863 	wm_serdes_power_up_link_82575(sc);
   10864 
   10865 	sc->sc_ctrl |= CTRL_SLU;
   10866 
   10867 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10868 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10869 
   10870 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10871 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10872 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10873 	case CTRL_EXT_LINK_MODE_SGMII:
   10874 		pcs_autoneg = true;
   10875 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10876 		break;
   10877 	case CTRL_EXT_LINK_MODE_1000KX:
   10878 		pcs_autoneg = false;
   10879 		/* FALLTHROUGH */
   10880 	default:
   10881 		if ((sc->sc_type == WM_T_82575)
   10882 		    || (sc->sc_type == WM_T_82576)) {
   10883 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10884 				pcs_autoneg = false;
   10885 		}
   10886 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10887 		    | CTRL_FRCFDX;
   10888 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10889 	}
   10890 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10891 
   10892 	if (pcs_autoneg) {
   10893 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10894 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10895 
   10896 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10897 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10898 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10899 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10900 	} else
   10901 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10902 
   10903 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10904 
   10905 
   10906 	return 0;
   10907 }
   10908 
   10909 static void
   10910 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10911 {
   10912 	struct wm_softc *sc = ifp->if_softc;
   10913 	struct mii_data *mii = &sc->sc_mii;
   10914 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10915 	uint32_t pcs_adv, pcs_lpab, reg;
   10916 
   10917 	ifmr->ifm_status = IFM_AVALID;
   10918 	ifmr->ifm_active = IFM_ETHER;
   10919 
   10920 	/* Check PCS */
   10921 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10922 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10923 		ifmr->ifm_active |= IFM_NONE;
   10924 		sc->sc_tbi_linkup = 0;
   10925 		goto setled;
   10926 	}
   10927 
   10928 	sc->sc_tbi_linkup = 1;
   10929 	ifmr->ifm_status |= IFM_ACTIVE;
   10930 	if (sc->sc_type == WM_T_I354) {
   10931 		uint32_t status;
   10932 
   10933 		status = CSR_READ(sc, WMREG_STATUS);
   10934 		if (((status & STATUS_2P5_SKU) != 0)
   10935 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10936 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10937 		} else
   10938 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10939 	} else {
   10940 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10941 		case PCS_LSTS_SPEED_10:
   10942 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10943 			break;
   10944 		case PCS_LSTS_SPEED_100:
   10945 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10946 			break;
   10947 		case PCS_LSTS_SPEED_1000:
   10948 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10949 			break;
   10950 		default:
   10951 			device_printf(sc->sc_dev, "Unknown speed\n");
   10952 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10953 			break;
   10954 		}
   10955 	}
   10956 	if ((reg & PCS_LSTS_FDX) != 0)
   10957 		ifmr->ifm_active |= IFM_FDX;
   10958 	else
   10959 		ifmr->ifm_active |= IFM_HDX;
   10960 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10961 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10962 		/* Check flow */
   10963 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10964 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10965 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10966 			goto setled;
   10967 		}
   10968 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10969 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10970 		DPRINTF(WM_DEBUG_LINK,
   10971 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10972 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10973 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10974 			mii->mii_media_active |= IFM_FLOW
   10975 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10976 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10977 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10978 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10979 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10980 			mii->mii_media_active |= IFM_FLOW
   10981 			    | IFM_ETH_TXPAUSE;
   10982 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10983 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10984 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10985 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10986 			mii->mii_media_active |= IFM_FLOW
   10987 			    | IFM_ETH_RXPAUSE;
   10988 		}
   10989 	}
   10990 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10991 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10992 setled:
   10993 	wm_tbi_serdes_set_linkled(sc);
   10994 }
   10995 
   10996 /*
   10997  * wm_serdes_tick:
   10998  *
   10999  *	Check the link on serdes devices.
   11000  */
   11001 static void
   11002 wm_serdes_tick(struct wm_softc *sc)
   11003 {
   11004 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11005 	struct mii_data *mii = &sc->sc_mii;
   11006 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11007 	uint32_t reg;
   11008 
   11009 	KASSERT(WM_CORE_LOCKED(sc));
   11010 
   11011 	mii->mii_media_status = IFM_AVALID;
   11012 	mii->mii_media_active = IFM_ETHER;
   11013 
   11014 	/* Check PCS */
   11015 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11016 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11017 		mii->mii_media_status |= IFM_ACTIVE;
   11018 		sc->sc_tbi_linkup = 1;
   11019 		sc->sc_tbi_serdes_ticks = 0;
   11020 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11021 		if ((reg & PCS_LSTS_FDX) != 0)
   11022 			mii->mii_media_active |= IFM_FDX;
   11023 		else
   11024 			mii->mii_media_active |= IFM_HDX;
   11025 	} else {
   11026 		mii->mii_media_status |= IFM_NONE;
   11027 		sc->sc_tbi_linkup = 0;
   11028 		/* If the timer expired, retry autonegotiation */
   11029 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11030 		    && (++sc->sc_tbi_serdes_ticks
   11031 			>= sc->sc_tbi_serdes_anegticks)) {
   11032 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11033 			sc->sc_tbi_serdes_ticks = 0;
   11034 			/* XXX */
   11035 			wm_serdes_mediachange(ifp);
   11036 		}
   11037 	}
   11038 
   11039 	wm_tbi_serdes_set_linkled(sc);
   11040 }
   11041 
   11042 /* SFP related */
   11043 
   11044 static int
   11045 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11046 {
   11047 	uint32_t i2ccmd;
   11048 	int i;
   11049 
   11050 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11051 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11052 
   11053 	/* Poll the ready bit */
   11054 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11055 		delay(50);
   11056 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11057 		if (i2ccmd & I2CCMD_READY)
   11058 			break;
   11059 	}
   11060 	if ((i2ccmd & I2CCMD_READY) == 0)
   11061 		return -1;
   11062 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11063 		return -1;
   11064 
   11065 	*data = i2ccmd & 0x00ff;
   11066 
   11067 	return 0;
   11068 }
   11069 
   11070 static uint32_t
   11071 wm_sfp_get_media_type(struct wm_softc *sc)
   11072 {
   11073 	uint32_t ctrl_ext;
   11074 	uint8_t val = 0;
   11075 	int timeout = 3;
   11076 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11077 	int rv = -1;
   11078 
   11079 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11080 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11081 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11082 	CSR_WRITE_FLUSH(sc);
   11083 
   11084 	/* Read SFP module data */
   11085 	while (timeout) {
   11086 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11087 		if (rv == 0)
   11088 			break;
   11089 		delay(100*1000); /* XXX too big */
   11090 		timeout--;
   11091 	}
   11092 	if (rv != 0)
   11093 		goto out;
   11094 	switch (val) {
   11095 	case SFF_SFP_ID_SFF:
   11096 		aprint_normal_dev(sc->sc_dev,
   11097 		    "Module/Connector soldered to board\n");
   11098 		break;
   11099 	case SFF_SFP_ID_SFP:
   11100 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11101 		break;
   11102 	case SFF_SFP_ID_UNKNOWN:
   11103 		goto out;
   11104 	default:
   11105 		break;
   11106 	}
   11107 
   11108 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11109 	if (rv != 0) {
   11110 		goto out;
   11111 	}
   11112 
   11113 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11114 		mediatype = WM_MEDIATYPE_SERDES;
   11115 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11116 		sc->sc_flags |= WM_F_SGMII;
   11117 		mediatype = WM_MEDIATYPE_COPPER;
   11118 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11119 		sc->sc_flags |= WM_F_SGMII;
   11120 		mediatype = WM_MEDIATYPE_SERDES;
   11121 	}
   11122 
   11123 out:
   11124 	/* Restore I2C interface setting */
   11125 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11126 
   11127 	return mediatype;
   11128 }
   11129 
   11130 /*
   11131  * NVM related.
   11132  * Microwire, SPI (w/wo EERD) and Flash.
   11133  */
   11134 
   11135 /* Both spi and uwire */
   11136 
   11137 /*
   11138  * wm_eeprom_sendbits:
   11139  *
   11140  *	Send a series of bits to the EEPROM.
   11141  */
   11142 static void
   11143 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11144 {
   11145 	uint32_t reg;
   11146 	int x;
   11147 
   11148 	reg = CSR_READ(sc, WMREG_EECD);
   11149 
   11150 	for (x = nbits; x > 0; x--) {
   11151 		if (bits & (1U << (x - 1)))
   11152 			reg |= EECD_DI;
   11153 		else
   11154 			reg &= ~EECD_DI;
   11155 		CSR_WRITE(sc, WMREG_EECD, reg);
   11156 		CSR_WRITE_FLUSH(sc);
   11157 		delay(2);
   11158 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11159 		CSR_WRITE_FLUSH(sc);
   11160 		delay(2);
   11161 		CSR_WRITE(sc, WMREG_EECD, reg);
   11162 		CSR_WRITE_FLUSH(sc);
   11163 		delay(2);
   11164 	}
   11165 }
   11166 
   11167 /*
   11168  * wm_eeprom_recvbits:
   11169  *
   11170  *	Receive a series of bits from the EEPROM.
   11171  */
   11172 static void
   11173 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11174 {
   11175 	uint32_t reg, val;
   11176 	int x;
   11177 
   11178 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11179 
   11180 	val = 0;
   11181 	for (x = nbits; x > 0; x--) {
   11182 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11183 		CSR_WRITE_FLUSH(sc);
   11184 		delay(2);
   11185 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11186 			val |= (1U << (x - 1));
   11187 		CSR_WRITE(sc, WMREG_EECD, reg);
   11188 		CSR_WRITE_FLUSH(sc);
   11189 		delay(2);
   11190 	}
   11191 	*valp = val;
   11192 }
   11193 
   11194 /* Microwire */
   11195 
   11196 /*
   11197  * wm_nvm_read_uwire:
   11198  *
   11199  *	Read a word from the EEPROM using the MicroWire protocol.
   11200  */
   11201 static int
   11202 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11203 {
   11204 	uint32_t reg, val;
   11205 	int i;
   11206 
   11207 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11208 		device_xname(sc->sc_dev), __func__));
   11209 
   11210 	for (i = 0; i < wordcnt; i++) {
   11211 		/* Clear SK and DI. */
   11212 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11213 		CSR_WRITE(sc, WMREG_EECD, reg);
   11214 
   11215 		/*
   11216 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11217 		 * and Xen.
   11218 		 *
   11219 		 * We use this workaround only for 82540 because qemu's
   11220 		 * e1000 act as 82540.
   11221 		 */
   11222 		if (sc->sc_type == WM_T_82540) {
   11223 			reg |= EECD_SK;
   11224 			CSR_WRITE(sc, WMREG_EECD, reg);
   11225 			reg &= ~EECD_SK;
   11226 			CSR_WRITE(sc, WMREG_EECD, reg);
   11227 			CSR_WRITE_FLUSH(sc);
   11228 			delay(2);
   11229 		}
   11230 		/* XXX: end of workaround */
   11231 
   11232 		/* Set CHIP SELECT. */
   11233 		reg |= EECD_CS;
   11234 		CSR_WRITE(sc, WMREG_EECD, reg);
   11235 		CSR_WRITE_FLUSH(sc);
   11236 		delay(2);
   11237 
   11238 		/* Shift in the READ command. */
   11239 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11240 
   11241 		/* Shift in address. */
   11242 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11243 
   11244 		/* Shift out the data. */
   11245 		wm_eeprom_recvbits(sc, &val, 16);
   11246 		data[i] = val & 0xffff;
   11247 
   11248 		/* Clear CHIP SELECT. */
   11249 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11250 		CSR_WRITE(sc, WMREG_EECD, reg);
   11251 		CSR_WRITE_FLUSH(sc);
   11252 		delay(2);
   11253 	}
   11254 
   11255 	return 0;
   11256 }
   11257 
   11258 /* SPI */
   11259 
   11260 /*
   11261  * Set SPI and FLASH related information from the EECD register.
   11262  * For 82541 and 82547, the word size is taken from EEPROM.
   11263  */
   11264 static int
   11265 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11266 {
   11267 	int size;
   11268 	uint32_t reg;
   11269 	uint16_t data;
   11270 
   11271 	reg = CSR_READ(sc, WMREG_EECD);
   11272 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11273 
   11274 	/* Read the size of NVM from EECD by default */
   11275 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11276 	switch (sc->sc_type) {
   11277 	case WM_T_82541:
   11278 	case WM_T_82541_2:
   11279 	case WM_T_82547:
   11280 	case WM_T_82547_2:
   11281 		/* Set dummy value to access EEPROM */
   11282 		sc->sc_nvm_wordsize = 64;
   11283 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11284 		reg = data;
   11285 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11286 		if (size == 0)
   11287 			size = 6; /* 64 word size */
   11288 		else
   11289 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11290 		break;
   11291 	case WM_T_80003:
   11292 	case WM_T_82571:
   11293 	case WM_T_82572:
   11294 	case WM_T_82573: /* SPI case */
   11295 	case WM_T_82574: /* SPI case */
   11296 	case WM_T_82583: /* SPI case */
   11297 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11298 		if (size > 14)
   11299 			size = 14;
   11300 		break;
   11301 	case WM_T_82575:
   11302 	case WM_T_82576:
   11303 	case WM_T_82580:
   11304 	case WM_T_I350:
   11305 	case WM_T_I354:
   11306 	case WM_T_I210:
   11307 	case WM_T_I211:
   11308 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11309 		if (size > 15)
   11310 			size = 15;
   11311 		break;
   11312 	default:
   11313 		aprint_error_dev(sc->sc_dev,
   11314 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11315 		return -1;
   11316 		break;
   11317 	}
   11318 
   11319 	sc->sc_nvm_wordsize = 1 << size;
   11320 
   11321 	return 0;
   11322 }
   11323 
   11324 /*
   11325  * wm_nvm_ready_spi:
   11326  *
   11327  *	Wait for a SPI EEPROM to be ready for commands.
   11328  */
   11329 static int
   11330 wm_nvm_ready_spi(struct wm_softc *sc)
   11331 {
   11332 	uint32_t val;
   11333 	int usec;
   11334 
   11335 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11336 		device_xname(sc->sc_dev), __func__));
   11337 
   11338 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11339 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11340 		wm_eeprom_recvbits(sc, &val, 8);
   11341 		if ((val & SPI_SR_RDY) == 0)
   11342 			break;
   11343 	}
   11344 	if (usec >= SPI_MAX_RETRIES) {
   11345 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11346 		return 1;
   11347 	}
   11348 	return 0;
   11349 }
   11350 
   11351 /*
   11352  * wm_nvm_read_spi:
   11353  *
   11354  *	Read a work from the EEPROM using the SPI protocol.
   11355  */
   11356 static int
   11357 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11358 {
   11359 	uint32_t reg, val;
   11360 	int i;
   11361 	uint8_t opc;
   11362 
   11363 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11364 		device_xname(sc->sc_dev), __func__));
   11365 
   11366 	/* Clear SK and CS. */
   11367 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11368 	CSR_WRITE(sc, WMREG_EECD, reg);
   11369 	CSR_WRITE_FLUSH(sc);
   11370 	delay(2);
   11371 
   11372 	if (wm_nvm_ready_spi(sc))
   11373 		return 1;
   11374 
   11375 	/* Toggle CS to flush commands. */
   11376 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11377 	CSR_WRITE_FLUSH(sc);
   11378 	delay(2);
   11379 	CSR_WRITE(sc, WMREG_EECD, reg);
   11380 	CSR_WRITE_FLUSH(sc);
   11381 	delay(2);
   11382 
   11383 	opc = SPI_OPC_READ;
   11384 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11385 		opc |= SPI_OPC_A8;
   11386 
   11387 	wm_eeprom_sendbits(sc, opc, 8);
   11388 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11389 
   11390 	for (i = 0; i < wordcnt; i++) {
   11391 		wm_eeprom_recvbits(sc, &val, 16);
   11392 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11393 	}
   11394 
   11395 	/* Raise CS and clear SK. */
   11396 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11397 	CSR_WRITE(sc, WMREG_EECD, reg);
   11398 	CSR_WRITE_FLUSH(sc);
   11399 	delay(2);
   11400 
   11401 	return 0;
   11402 }
   11403 
   11404 /* Using with EERD */
   11405 
   11406 static int
   11407 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11408 {
   11409 	uint32_t attempts = 100000;
   11410 	uint32_t i, reg = 0;
   11411 	int32_t done = -1;
   11412 
   11413 	for (i = 0; i < attempts; i++) {
   11414 		reg = CSR_READ(sc, rw);
   11415 
   11416 		if (reg & EERD_DONE) {
   11417 			done = 0;
   11418 			break;
   11419 		}
   11420 		delay(5);
   11421 	}
   11422 
   11423 	return done;
   11424 }
   11425 
   11426 static int
   11427 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11428     uint16_t *data)
   11429 {
   11430 	int i, eerd = 0;
   11431 	int error = 0;
   11432 
   11433 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11434 		device_xname(sc->sc_dev), __func__));
   11435 
   11436 	for (i = 0; i < wordcnt; i++) {
   11437 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11438 
   11439 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11440 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11441 		if (error != 0)
   11442 			break;
   11443 
   11444 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11445 	}
   11446 
   11447 	return error;
   11448 }
   11449 
   11450 /* Flash */
   11451 
   11452 static int
   11453 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11454 {
   11455 	uint32_t eecd;
   11456 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11457 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11458 	uint8_t sig_byte = 0;
   11459 
   11460 	switch (sc->sc_type) {
   11461 	case WM_T_PCH_SPT:
   11462 		/*
   11463 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11464 		 * sector valid bits from the NVM.
   11465 		 */
   11466 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11467 		if ((*bank == 0) || (*bank == 1)) {
   11468 			aprint_error_dev(sc->sc_dev,
   11469 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11470 				*bank);
   11471 			return -1;
   11472 		} else {
   11473 			*bank = *bank - 2;
   11474 			return 0;
   11475 		}
   11476 	case WM_T_ICH8:
   11477 	case WM_T_ICH9:
   11478 		eecd = CSR_READ(sc, WMREG_EECD);
   11479 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11480 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11481 			return 0;
   11482 		}
   11483 		/* FALLTHROUGH */
   11484 	default:
   11485 		/* Default to 0 */
   11486 		*bank = 0;
   11487 
   11488 		/* Check bank 0 */
   11489 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11490 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11491 			*bank = 0;
   11492 			return 0;
   11493 		}
   11494 
   11495 		/* Check bank 1 */
   11496 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11497 		    &sig_byte);
   11498 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11499 			*bank = 1;
   11500 			return 0;
   11501 		}
   11502 	}
   11503 
   11504 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11505 		device_xname(sc->sc_dev)));
   11506 	return -1;
   11507 }
   11508 
   11509 /******************************************************************************
   11510  * This function does initial flash setup so that a new read/write/erase cycle
   11511  * can be started.
   11512  *
   11513  * sc - The pointer to the hw structure
   11514  ****************************************************************************/
   11515 static int32_t
   11516 wm_ich8_cycle_init(struct wm_softc *sc)
   11517 {
   11518 	uint16_t hsfsts;
   11519 	int32_t error = 1;
   11520 	int32_t i     = 0;
   11521 
   11522 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11523 
   11524 	/* May be check the Flash Des Valid bit in Hw status */
   11525 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11526 		return error;
   11527 	}
   11528 
   11529 	/* Clear FCERR in Hw status by writing 1 */
   11530 	/* Clear DAEL in Hw status by writing a 1 */
   11531 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11532 
   11533 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11534 
   11535 	/*
   11536 	 * Either we should have a hardware SPI cycle in progress bit to check
   11537 	 * against, in order to start a new cycle or FDONE bit should be
   11538 	 * changed in the hardware so that it is 1 after harware reset, which
   11539 	 * can then be used as an indication whether a cycle is in progress or
   11540 	 * has been completed .. we should also have some software semaphore
   11541 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11542 	 * threads access to those bits can be sequentiallized or a way so that
   11543 	 * 2 threads dont start the cycle at the same time
   11544 	 */
   11545 
   11546 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11547 		/*
   11548 		 * There is no cycle running at present, so we can start a
   11549 		 * cycle
   11550 		 */
   11551 
   11552 		/* Begin by setting Flash Cycle Done. */
   11553 		hsfsts |= HSFSTS_DONE;
   11554 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11555 		error = 0;
   11556 	} else {
   11557 		/*
   11558 		 * otherwise poll for sometime so the current cycle has a
   11559 		 * chance to end before giving up.
   11560 		 */
   11561 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11562 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11563 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11564 				error = 0;
   11565 				break;
   11566 			}
   11567 			delay(1);
   11568 		}
   11569 		if (error == 0) {
   11570 			/*
   11571 			 * Successful in waiting for previous cycle to timeout,
   11572 			 * now set the Flash Cycle Done.
   11573 			 */
   11574 			hsfsts |= HSFSTS_DONE;
   11575 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11576 		}
   11577 	}
   11578 	return error;
   11579 }
   11580 
   11581 /******************************************************************************
   11582  * This function starts a flash cycle and waits for its completion
   11583  *
   11584  * sc - The pointer to the hw structure
   11585  ****************************************************************************/
   11586 static int32_t
   11587 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11588 {
   11589 	uint16_t hsflctl;
   11590 	uint16_t hsfsts;
   11591 	int32_t error = 1;
   11592 	uint32_t i = 0;
   11593 
   11594 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11595 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11596 	hsflctl |= HSFCTL_GO;
   11597 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11598 
   11599 	/* Wait till FDONE bit is set to 1 */
   11600 	do {
   11601 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11602 		if (hsfsts & HSFSTS_DONE)
   11603 			break;
   11604 		delay(1);
   11605 		i++;
   11606 	} while (i < timeout);
   11607 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11608 		error = 0;
   11609 
   11610 	return error;
   11611 }
   11612 
   11613 /******************************************************************************
   11614  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11615  *
   11616  * sc - The pointer to the hw structure
   11617  * index - The index of the byte or word to read.
   11618  * size - Size of data to read, 1=byte 2=word, 4=dword
   11619  * data - Pointer to the word to store the value read.
   11620  *****************************************************************************/
   11621 static int32_t
   11622 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11623     uint32_t size, uint32_t *data)
   11624 {
   11625 	uint16_t hsfsts;
   11626 	uint16_t hsflctl;
   11627 	uint32_t flash_linear_address;
   11628 	uint32_t flash_data = 0;
   11629 	int32_t error = 1;
   11630 	int32_t count = 0;
   11631 
   11632 	if (size < 1  || size > 4 || data == 0x0 ||
   11633 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11634 		return error;
   11635 
   11636 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11637 	    sc->sc_ich8_flash_base;
   11638 
   11639 	do {
   11640 		delay(1);
   11641 		/* Steps */
   11642 		error = wm_ich8_cycle_init(sc);
   11643 		if (error)
   11644 			break;
   11645 
   11646 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11647 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11648 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11649 		    & HSFCTL_BCOUNT_MASK;
   11650 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11651 		if (sc->sc_type == WM_T_PCH_SPT) {
   11652 			/*
   11653 			 * In SPT, This register is in Lan memory space, not
   11654 			 * flash. Therefore, only 32 bit access is supported.
   11655 			 */
   11656 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11657 			    (uint32_t)hsflctl);
   11658 		} else
   11659 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11660 
   11661 		/*
   11662 		 * Write the last 24 bits of index into Flash Linear address
   11663 		 * field in Flash Address
   11664 		 */
   11665 		/* TODO: TBD maybe check the index against the size of flash */
   11666 
   11667 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11668 
   11669 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11670 
   11671 		/*
   11672 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11673 		 * the whole sequence a few more times, else read in (shift in)
   11674 		 * the Flash Data0, the order is least significant byte first
   11675 		 * msb to lsb
   11676 		 */
   11677 		if (error == 0) {
   11678 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11679 			if (size == 1)
   11680 				*data = (uint8_t)(flash_data & 0x000000FF);
   11681 			else if (size == 2)
   11682 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11683 			else if (size == 4)
   11684 				*data = (uint32_t)flash_data;
   11685 			break;
   11686 		} else {
   11687 			/*
   11688 			 * If we've gotten here, then things are probably
   11689 			 * completely hosed, but if the error condition is
   11690 			 * detected, it won't hurt to give it another try...
   11691 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11692 			 */
   11693 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11694 			if (hsfsts & HSFSTS_ERR) {
   11695 				/* Repeat for some time before giving up. */
   11696 				continue;
   11697 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11698 				break;
   11699 		}
   11700 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11701 
   11702 	return error;
   11703 }
   11704 
   11705 /******************************************************************************
   11706  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11707  *
   11708  * sc - pointer to wm_hw structure
   11709  * index - The index of the byte to read.
   11710  * data - Pointer to a byte to store the value read.
   11711  *****************************************************************************/
   11712 static int32_t
   11713 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11714 {
   11715 	int32_t status;
   11716 	uint32_t word = 0;
   11717 
   11718 	status = wm_read_ich8_data(sc, index, 1, &word);
   11719 	if (status == 0)
   11720 		*data = (uint8_t)word;
   11721 	else
   11722 		*data = 0;
   11723 
   11724 	return status;
   11725 }
   11726 
   11727 /******************************************************************************
   11728  * Reads a word from the NVM using the ICH8 flash access registers.
   11729  *
   11730  * sc - pointer to wm_hw structure
   11731  * index - The starting byte index of the word to read.
   11732  * data - Pointer to a word to store the value read.
   11733  *****************************************************************************/
   11734 static int32_t
   11735 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11736 {
   11737 	int32_t status;
   11738 	uint32_t word = 0;
   11739 
   11740 	status = wm_read_ich8_data(sc, index, 2, &word);
   11741 	if (status == 0)
   11742 		*data = (uint16_t)word;
   11743 	else
   11744 		*data = 0;
   11745 
   11746 	return status;
   11747 }
   11748 
   11749 /******************************************************************************
   11750  * Reads a dword from the NVM using the ICH8 flash access registers.
   11751  *
   11752  * sc - pointer to wm_hw structure
   11753  * index - The starting byte index of the word to read.
   11754  * data - Pointer to a word to store the value read.
   11755  *****************************************************************************/
   11756 static int32_t
   11757 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11758 {
   11759 	int32_t status;
   11760 
   11761 	status = wm_read_ich8_data(sc, index, 4, data);
   11762 	return status;
   11763 }
   11764 
   11765 /******************************************************************************
   11766  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11767  * register.
   11768  *
   11769  * sc - Struct containing variables accessed by shared code
   11770  * offset - offset of word in the EEPROM to read
   11771  * data - word read from the EEPROM
   11772  * words - number of words to read
   11773  *****************************************************************************/
   11774 static int
   11775 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11776 {
   11777 	int32_t  error = 0;
   11778 	uint32_t flash_bank = 0;
   11779 	uint32_t act_offset = 0;
   11780 	uint32_t bank_offset = 0;
   11781 	uint16_t word = 0;
   11782 	uint16_t i = 0;
   11783 
   11784 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11785 		device_xname(sc->sc_dev), __func__));
   11786 
   11787 	/*
   11788 	 * We need to know which is the valid flash bank.  In the event
   11789 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11790 	 * managing flash_bank.  So it cannot be trusted and needs
   11791 	 * to be updated with each read.
   11792 	 */
   11793 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11794 	if (error) {
   11795 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11796 			device_xname(sc->sc_dev)));
   11797 		flash_bank = 0;
   11798 	}
   11799 
   11800 	/*
   11801 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11802 	 * size
   11803 	 */
   11804 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11805 
   11806 	error = wm_get_swfwhw_semaphore(sc);
   11807 	if (error) {
   11808 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11809 		    __func__);
   11810 		return error;
   11811 	}
   11812 
   11813 	for (i = 0; i < words; i++) {
   11814 		/* The NVM part needs a byte offset, hence * 2 */
   11815 		act_offset = bank_offset + ((offset + i) * 2);
   11816 		error = wm_read_ich8_word(sc, act_offset, &word);
   11817 		if (error) {
   11818 			aprint_error_dev(sc->sc_dev,
   11819 			    "%s: failed to read NVM\n", __func__);
   11820 			break;
   11821 		}
   11822 		data[i] = word;
   11823 	}
   11824 
   11825 	wm_put_swfwhw_semaphore(sc);
   11826 	return error;
   11827 }
   11828 
   11829 /******************************************************************************
   11830  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11831  * register.
   11832  *
   11833  * sc - Struct containing variables accessed by shared code
   11834  * offset - offset of word in the EEPROM to read
   11835  * data - word read from the EEPROM
   11836  * words - number of words to read
   11837  *****************************************************************************/
   11838 static int
   11839 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11840 {
   11841 	int32_t  error = 0;
   11842 	uint32_t flash_bank = 0;
   11843 	uint32_t act_offset = 0;
   11844 	uint32_t bank_offset = 0;
   11845 	uint32_t dword = 0;
   11846 	uint16_t i = 0;
   11847 
   11848 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11849 		device_xname(sc->sc_dev), __func__));
   11850 
   11851 	/*
   11852 	 * We need to know which is the valid flash bank.  In the event
   11853 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11854 	 * managing flash_bank.  So it cannot be trusted and needs
   11855 	 * to be updated with each read.
   11856 	 */
   11857 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11858 	if (error) {
   11859 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11860 			device_xname(sc->sc_dev)));
   11861 		flash_bank = 0;
   11862 	}
   11863 
   11864 	/*
   11865 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11866 	 * size
   11867 	 */
   11868 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11869 
   11870 	error = wm_get_swfwhw_semaphore(sc);
   11871 	if (error) {
   11872 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11873 		    __func__);
   11874 		return error;
   11875 	}
   11876 
   11877 	for (i = 0; i < words; i++) {
   11878 		/* The NVM part needs a byte offset, hence * 2 */
   11879 		act_offset = bank_offset + ((offset + i) * 2);
   11880 		/* but we must read dword aligned, so mask ... */
   11881 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11882 		if (error) {
   11883 			aprint_error_dev(sc->sc_dev,
   11884 			    "%s: failed to read NVM\n", __func__);
   11885 			break;
   11886 		}
   11887 		/* ... and pick out low or high word */
   11888 		if ((act_offset & 0x2) == 0)
   11889 			data[i] = (uint16_t)(dword & 0xFFFF);
   11890 		else
   11891 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11892 	}
   11893 
   11894 	wm_put_swfwhw_semaphore(sc);
   11895 	return error;
   11896 }
   11897 
   11898 /* iNVM */
   11899 
   11900 static int
   11901 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11902 {
   11903 	int32_t  rv = 0;
   11904 	uint32_t invm_dword;
   11905 	uint16_t i;
   11906 	uint8_t record_type, word_address;
   11907 
   11908 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11909 		device_xname(sc->sc_dev), __func__));
   11910 
   11911 	for (i = 0; i < INVM_SIZE; i++) {
   11912 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11913 		/* Get record type */
   11914 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11915 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11916 			break;
   11917 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11918 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11919 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11920 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11921 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11922 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11923 			if (word_address == address) {
   11924 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11925 				rv = 0;
   11926 				break;
   11927 			}
   11928 		}
   11929 	}
   11930 
   11931 	return rv;
   11932 }
   11933 
   11934 static int
   11935 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11936 {
   11937 	int rv = 0;
   11938 	int i;
   11939 
   11940 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11941 		device_xname(sc->sc_dev), __func__));
   11942 
   11943 	for (i = 0; i < words; i++) {
   11944 		switch (offset + i) {
   11945 		case NVM_OFF_MACADDR:
   11946 		case NVM_OFF_MACADDR1:
   11947 		case NVM_OFF_MACADDR2:
   11948 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11949 			if (rv != 0) {
   11950 				data[i] = 0xffff;
   11951 				rv = -1;
   11952 			}
   11953 			break;
   11954 		case NVM_OFF_CFG2:
   11955 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11956 			if (rv != 0) {
   11957 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11958 				rv = 0;
   11959 			}
   11960 			break;
   11961 		case NVM_OFF_CFG4:
   11962 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11963 			if (rv != 0) {
   11964 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11965 				rv = 0;
   11966 			}
   11967 			break;
   11968 		case NVM_OFF_LED_1_CFG:
   11969 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11970 			if (rv != 0) {
   11971 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11972 				rv = 0;
   11973 			}
   11974 			break;
   11975 		case NVM_OFF_LED_0_2_CFG:
   11976 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11977 			if (rv != 0) {
   11978 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11979 				rv = 0;
   11980 			}
   11981 			break;
   11982 		case NVM_OFF_ID_LED_SETTINGS:
   11983 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11984 			if (rv != 0) {
   11985 				*data = ID_LED_RESERVED_FFFF;
   11986 				rv = 0;
   11987 			}
   11988 			break;
   11989 		default:
   11990 			DPRINTF(WM_DEBUG_NVM,
   11991 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11992 			*data = NVM_RESERVED_WORD;
   11993 			break;
   11994 		}
   11995 	}
   11996 
   11997 	return rv;
   11998 }
   11999 
   12000 /* Lock, detecting NVM type, validate checksum, version and read */
   12001 
   12002 /*
   12003  * wm_nvm_acquire:
   12004  *
   12005  *	Perform the EEPROM handshake required on some chips.
   12006  */
   12007 static int
   12008 wm_nvm_acquire(struct wm_softc *sc)
   12009 {
   12010 	uint32_t reg;
   12011 	int x;
   12012 	int ret = 0;
   12013 
   12014 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12015 		device_xname(sc->sc_dev), __func__));
   12016 
   12017 	if (sc->sc_type >= WM_T_ICH8) {
   12018 		ret = wm_get_nvm_ich8lan(sc);
   12019 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12020 		ret = wm_get_swfwhw_semaphore(sc);
   12021 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12022 		/* This will also do wm_get_swsm_semaphore() if needed */
   12023 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12024 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12025 		ret = wm_get_swsm_semaphore(sc);
   12026 	}
   12027 
   12028 	if (ret) {
   12029 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12030 			__func__);
   12031 		return 1;
   12032 	}
   12033 
   12034 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12035 		reg = CSR_READ(sc, WMREG_EECD);
   12036 
   12037 		/* Request EEPROM access. */
   12038 		reg |= EECD_EE_REQ;
   12039 		CSR_WRITE(sc, WMREG_EECD, reg);
   12040 
   12041 		/* ..and wait for it to be granted. */
   12042 		for (x = 0; x < 1000; x++) {
   12043 			reg = CSR_READ(sc, WMREG_EECD);
   12044 			if (reg & EECD_EE_GNT)
   12045 				break;
   12046 			delay(5);
   12047 		}
   12048 		if ((reg & EECD_EE_GNT) == 0) {
   12049 			aprint_error_dev(sc->sc_dev,
   12050 			    "could not acquire EEPROM GNT\n");
   12051 			reg &= ~EECD_EE_REQ;
   12052 			CSR_WRITE(sc, WMREG_EECD, reg);
   12053 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12054 				wm_put_swfwhw_semaphore(sc);
   12055 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12056 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12057 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12058 				wm_put_swsm_semaphore(sc);
   12059 			return 1;
   12060 		}
   12061 	}
   12062 
   12063 	return 0;
   12064 }
   12065 
   12066 /*
   12067  * wm_nvm_release:
   12068  *
   12069  *	Release the EEPROM mutex.
   12070  */
   12071 static void
   12072 wm_nvm_release(struct wm_softc *sc)
   12073 {
   12074 	uint32_t reg;
   12075 
   12076 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12077 		device_xname(sc->sc_dev), __func__));
   12078 
   12079 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12080 		reg = CSR_READ(sc, WMREG_EECD);
   12081 		reg &= ~EECD_EE_REQ;
   12082 		CSR_WRITE(sc, WMREG_EECD, reg);
   12083 	}
   12084 
   12085 	if (sc->sc_type >= WM_T_ICH8) {
   12086 		wm_put_nvm_ich8lan(sc);
   12087 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12088 		wm_put_swfwhw_semaphore(sc);
   12089 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12090 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12091 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12092 		wm_put_swsm_semaphore(sc);
   12093 }
   12094 
   12095 static int
   12096 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12097 {
   12098 	uint32_t eecd = 0;
   12099 
   12100 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12101 	    || sc->sc_type == WM_T_82583) {
   12102 		eecd = CSR_READ(sc, WMREG_EECD);
   12103 
   12104 		/* Isolate bits 15 & 16 */
   12105 		eecd = ((eecd >> 15) & 0x03);
   12106 
   12107 		/* If both bits are set, device is Flash type */
   12108 		if (eecd == 0x03)
   12109 			return 0;
   12110 	}
   12111 	return 1;
   12112 }
   12113 
   12114 static int
   12115 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12116 {
   12117 	uint32_t eec;
   12118 
   12119 	eec = CSR_READ(sc, WMREG_EEC);
   12120 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12121 		return 1;
   12122 
   12123 	return 0;
   12124 }
   12125 
   12126 /*
   12127  * wm_nvm_validate_checksum
   12128  *
   12129  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12130  */
   12131 static int
   12132 wm_nvm_validate_checksum(struct wm_softc *sc)
   12133 {
   12134 	uint16_t checksum;
   12135 	uint16_t eeprom_data;
   12136 #ifdef WM_DEBUG
   12137 	uint16_t csum_wordaddr, valid_checksum;
   12138 #endif
   12139 	int i;
   12140 
   12141 	checksum = 0;
   12142 
   12143 	/* Don't check for I211 */
   12144 	if (sc->sc_type == WM_T_I211)
   12145 		return 0;
   12146 
   12147 #ifdef WM_DEBUG
   12148 	if (sc->sc_type == WM_T_PCH_LPT) {
   12149 		csum_wordaddr = NVM_OFF_COMPAT;
   12150 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12151 	} else {
   12152 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12153 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12154 	}
   12155 
   12156 	/* Dump EEPROM image for debug */
   12157 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12158 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12159 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12160 		/* XXX PCH_SPT? */
   12161 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12162 		if ((eeprom_data & valid_checksum) == 0) {
   12163 			DPRINTF(WM_DEBUG_NVM,
   12164 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12165 				device_xname(sc->sc_dev), eeprom_data,
   12166 				    valid_checksum));
   12167 		}
   12168 	}
   12169 
   12170 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12171 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12172 		for (i = 0; i < NVM_SIZE; i++) {
   12173 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12174 				printf("XXXX ");
   12175 			else
   12176 				printf("%04hx ", eeprom_data);
   12177 			if (i % 8 == 7)
   12178 				printf("\n");
   12179 		}
   12180 	}
   12181 
   12182 #endif /* WM_DEBUG */
   12183 
   12184 	for (i = 0; i < NVM_SIZE; i++) {
   12185 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12186 			return 1;
   12187 		checksum += eeprom_data;
   12188 	}
   12189 
   12190 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12191 #ifdef WM_DEBUG
   12192 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12193 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12194 #endif
   12195 	}
   12196 
   12197 	return 0;
   12198 }
   12199 
   12200 static void
   12201 wm_nvm_version_invm(struct wm_softc *sc)
   12202 {
   12203 	uint32_t dword;
   12204 
   12205 	/*
   12206 	 * Linux's code to decode version is very strange, so we don't
   12207 	 * obey that algorithm and just use word 61 as the document.
   12208 	 * Perhaps it's not perfect though...
   12209 	 *
   12210 	 * Example:
   12211 	 *
   12212 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12213 	 */
   12214 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12215 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12216 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12217 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12218 }
   12219 
   12220 static void
   12221 wm_nvm_version(struct wm_softc *sc)
   12222 {
   12223 	uint16_t major, minor, build, patch;
   12224 	uint16_t uid0, uid1;
   12225 	uint16_t nvm_data;
   12226 	uint16_t off;
   12227 	bool check_version = false;
   12228 	bool check_optionrom = false;
   12229 	bool have_build = false;
   12230 	bool have_uid = true;
   12231 
   12232 	/*
   12233 	 * Version format:
   12234 	 *
   12235 	 * XYYZ
   12236 	 * X0YZ
   12237 	 * X0YY
   12238 	 *
   12239 	 * Example:
   12240 	 *
   12241 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12242 	 *	82571	0x50a6	5.10.6?
   12243 	 *	82572	0x506a	5.6.10?
   12244 	 *	82572EI	0x5069	5.6.9?
   12245 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12246 	 *		0x2013	2.1.3?
   12247 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12248 	 */
   12249 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12250 	switch (sc->sc_type) {
   12251 	case WM_T_82571:
   12252 	case WM_T_82572:
   12253 	case WM_T_82574:
   12254 	case WM_T_82583:
   12255 		check_version = true;
   12256 		check_optionrom = true;
   12257 		have_build = true;
   12258 		break;
   12259 	case WM_T_82575:
   12260 	case WM_T_82576:
   12261 	case WM_T_82580:
   12262 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12263 			check_version = true;
   12264 		break;
   12265 	case WM_T_I211:
   12266 		wm_nvm_version_invm(sc);
   12267 		have_uid = false;
   12268 		goto printver;
   12269 	case WM_T_I210:
   12270 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12271 			wm_nvm_version_invm(sc);
   12272 			have_uid = false;
   12273 			goto printver;
   12274 		}
   12275 		/* FALLTHROUGH */
   12276 	case WM_T_I350:
   12277 	case WM_T_I354:
   12278 		check_version = true;
   12279 		check_optionrom = true;
   12280 		break;
   12281 	default:
   12282 		return;
   12283 	}
   12284 	if (check_version) {
   12285 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12286 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12287 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12288 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12289 			build = nvm_data & NVM_BUILD_MASK;
   12290 			have_build = true;
   12291 		} else
   12292 			minor = nvm_data & 0x00ff;
   12293 
   12294 		/* Decimal */
   12295 		minor = (minor / 16) * 10 + (minor % 16);
   12296 		sc->sc_nvm_ver_major = major;
   12297 		sc->sc_nvm_ver_minor = minor;
   12298 
   12299 printver:
   12300 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12301 		    sc->sc_nvm_ver_minor);
   12302 		if (have_build) {
   12303 			sc->sc_nvm_ver_build = build;
   12304 			aprint_verbose(".%d", build);
   12305 		}
   12306 	}
   12307 	if (check_optionrom) {
   12308 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12309 		/* Option ROM Version */
   12310 		if ((off != 0x0000) && (off != 0xffff)) {
   12311 			off += NVM_COMBO_VER_OFF;
   12312 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12313 			wm_nvm_read(sc, off, 1, &uid0);
   12314 			if ((uid0 != 0) && (uid0 != 0xffff)
   12315 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12316 				/* 16bits */
   12317 				major = uid0 >> 8;
   12318 				build = (uid0 << 8) | (uid1 >> 8);
   12319 				patch = uid1 & 0x00ff;
   12320 				aprint_verbose(", option ROM Version %d.%d.%d",
   12321 				    major, build, patch);
   12322 			}
   12323 		}
   12324 	}
   12325 
   12326 	if (have_uid) {
   12327 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12328 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12329 	}
   12330 }
   12331 
   12332 /*
   12333  * wm_nvm_read:
   12334  *
   12335  *	Read data from the serial EEPROM.
   12336  */
   12337 static int
   12338 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12339 {
   12340 	int rv;
   12341 
   12342 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12343 		device_xname(sc->sc_dev), __func__));
   12344 
   12345 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12346 		return 1;
   12347 
   12348 	if (wm_nvm_acquire(sc))
   12349 		return 1;
   12350 
   12351 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12352 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12353 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12354 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12355 	else if (sc->sc_type == WM_T_PCH_SPT)
   12356 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12357 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12358 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12359 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12360 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12361 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12362 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12363 	else
   12364 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12365 
   12366 	wm_nvm_release(sc);
   12367 	return rv;
   12368 }
   12369 
   12370 /*
   12371  * Hardware semaphores.
   12372  * Very complexed...
   12373  */
   12374 
   12375 static int
   12376 wm_get_null(struct wm_softc *sc)
   12377 {
   12378 
   12379 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12380 		device_xname(sc->sc_dev), __func__));
   12381 	return 0;
   12382 }
   12383 
   12384 static void
   12385 wm_put_null(struct wm_softc *sc)
   12386 {
   12387 
   12388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12389 		device_xname(sc->sc_dev), __func__));
   12390 	return;
   12391 }
   12392 
   12393 /*
   12394  * Get hardware semaphore.
   12395  * Same as e1000_get_hw_semaphore_generic()
   12396  */
   12397 static int
   12398 wm_get_swsm_semaphore(struct wm_softc *sc)
   12399 {
   12400 	int32_t timeout;
   12401 	uint32_t swsm;
   12402 
   12403 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12404 		device_xname(sc->sc_dev), __func__));
   12405 	KASSERT(sc->sc_nvm_wordsize > 0);
   12406 
   12407 	/* Get the SW semaphore. */
   12408 	timeout = sc->sc_nvm_wordsize + 1;
   12409 	while (timeout) {
   12410 		swsm = CSR_READ(sc, WMREG_SWSM);
   12411 
   12412 		if ((swsm & SWSM_SMBI) == 0)
   12413 			break;
   12414 
   12415 		delay(50);
   12416 		timeout--;
   12417 	}
   12418 
   12419 	if (timeout == 0) {
   12420 		aprint_error_dev(sc->sc_dev,
   12421 		    "could not acquire SWSM SMBI\n");
   12422 		return 1;
   12423 	}
   12424 
   12425 	/* Get the FW semaphore. */
   12426 	timeout = sc->sc_nvm_wordsize + 1;
   12427 	while (timeout) {
   12428 		swsm = CSR_READ(sc, WMREG_SWSM);
   12429 		swsm |= SWSM_SWESMBI;
   12430 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12431 		/* If we managed to set the bit we got the semaphore. */
   12432 		swsm = CSR_READ(sc, WMREG_SWSM);
   12433 		if (swsm & SWSM_SWESMBI)
   12434 			break;
   12435 
   12436 		delay(50);
   12437 		timeout--;
   12438 	}
   12439 
   12440 	if (timeout == 0) {
   12441 		aprint_error_dev(sc->sc_dev,
   12442 		    "could not acquire SWSM SWESMBI\n");
   12443 		/* Release semaphores */
   12444 		wm_put_swsm_semaphore(sc);
   12445 		return 1;
   12446 	}
   12447 	return 0;
   12448 }
   12449 
   12450 /*
   12451  * Put hardware semaphore.
   12452  * Same as e1000_put_hw_semaphore_generic()
   12453  */
   12454 static void
   12455 wm_put_swsm_semaphore(struct wm_softc *sc)
   12456 {
   12457 	uint32_t swsm;
   12458 
   12459 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12460 		device_xname(sc->sc_dev), __func__));
   12461 
   12462 	swsm = CSR_READ(sc, WMREG_SWSM);
   12463 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12464 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12465 }
   12466 
   12467 /*
   12468  * Get SW/FW semaphore.
   12469  * Same as e1000_acquire_swfw_sync_82575().
   12470  */
   12471 static int
   12472 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12473 {
   12474 	uint32_t swfw_sync;
   12475 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12476 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12477 	int timeout = 200;
   12478 
   12479 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12480 		device_xname(sc->sc_dev), __func__));
   12481 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12482 
   12483 	for (timeout = 0; timeout < 200; timeout++) {
   12484 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12485 			if (wm_get_swsm_semaphore(sc)) {
   12486 				aprint_error_dev(sc->sc_dev,
   12487 				    "%s: failed to get semaphore\n",
   12488 				    __func__);
   12489 				return 1;
   12490 			}
   12491 		}
   12492 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12493 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12494 			swfw_sync |= swmask;
   12495 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12496 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12497 				wm_put_swsm_semaphore(sc);
   12498 			return 0;
   12499 		}
   12500 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12501 			wm_put_swsm_semaphore(sc);
   12502 		delay(5000);
   12503 	}
   12504 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12505 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12506 	return 1;
   12507 }
   12508 
   12509 static void
   12510 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12511 {
   12512 	uint32_t swfw_sync;
   12513 
   12514 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12515 		device_xname(sc->sc_dev), __func__));
   12516 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12517 
   12518 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12519 		while (wm_get_swsm_semaphore(sc) != 0)
   12520 			continue;
   12521 	}
   12522 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12523 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12524 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12525 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12526 		wm_put_swsm_semaphore(sc);
   12527 }
   12528 
   12529 static int
   12530 wm_get_phy_82575(struct wm_softc *sc)
   12531 {
   12532 
   12533 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12534 		device_xname(sc->sc_dev), __func__));
   12535 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12536 }
   12537 
   12538 static void
   12539 wm_put_phy_82575(struct wm_softc *sc)
   12540 {
   12541 
   12542 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12543 		device_xname(sc->sc_dev), __func__));
   12544 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12545 }
   12546 
   12547 static int
   12548 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12549 {
   12550 	uint32_t ext_ctrl;
   12551 	int timeout = 200;
   12552 
   12553 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12554 		device_xname(sc->sc_dev), __func__));
   12555 
   12556 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12557 	for (timeout = 0; timeout < 200; timeout++) {
   12558 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12559 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12560 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12561 
   12562 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12563 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12564 			return 0;
   12565 		delay(5000);
   12566 	}
   12567 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12568 	    device_xname(sc->sc_dev), ext_ctrl);
   12569 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12570 	return 1;
   12571 }
   12572 
   12573 static void
   12574 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12575 {
   12576 	uint32_t ext_ctrl;
   12577 
   12578 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12579 		device_xname(sc->sc_dev), __func__));
   12580 
   12581 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12582 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12583 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12584 
   12585 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12586 }
   12587 
   12588 static int
   12589 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12590 {
   12591 	uint32_t ext_ctrl;
   12592 	int timeout;
   12593 
   12594 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12595 		device_xname(sc->sc_dev), __func__));
   12596 	mutex_enter(sc->sc_ich_phymtx);
   12597 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12598 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12599 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12600 			break;
   12601 		delay(1000);
   12602 	}
   12603 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12604 		printf("%s: SW has already locked the resource\n",
   12605 		    device_xname(sc->sc_dev));
   12606 		goto out;
   12607 	}
   12608 
   12609 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12610 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12611 	for (timeout = 0; timeout < 1000; timeout++) {
   12612 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12613 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12614 			break;
   12615 		delay(1000);
   12616 	}
   12617 	if (timeout >= 1000) {
   12618 		printf("%s: failed to acquire semaphore\n",
   12619 		    device_xname(sc->sc_dev));
   12620 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12621 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12622 		goto out;
   12623 	}
   12624 	return 0;
   12625 
   12626 out:
   12627 	mutex_exit(sc->sc_ich_phymtx);
   12628 	return 1;
   12629 }
   12630 
   12631 static void
   12632 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12633 {
   12634 	uint32_t ext_ctrl;
   12635 
   12636 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12637 		device_xname(sc->sc_dev), __func__));
   12638 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12639 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12640 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12641 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12642 	} else {
   12643 		printf("%s: Semaphore unexpectedly released\n",
   12644 		    device_xname(sc->sc_dev));
   12645 	}
   12646 
   12647 	mutex_exit(sc->sc_ich_phymtx);
   12648 }
   12649 
   12650 static int
   12651 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12652 {
   12653 
   12654 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12655 		device_xname(sc->sc_dev), __func__));
   12656 	mutex_enter(sc->sc_ich_nvmmtx);
   12657 
   12658 	return 0;
   12659 }
   12660 
   12661 static void
   12662 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12663 {
   12664 
   12665 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12666 		device_xname(sc->sc_dev), __func__));
   12667 	mutex_exit(sc->sc_ich_nvmmtx);
   12668 }
   12669 
   12670 static int
   12671 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12672 {
   12673 	int i = 0;
   12674 	uint32_t reg;
   12675 
   12676 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12677 		device_xname(sc->sc_dev), __func__));
   12678 
   12679 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12680 	do {
   12681 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12682 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12683 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12684 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12685 			break;
   12686 		delay(2*1000);
   12687 		i++;
   12688 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12689 
   12690 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12691 		wm_put_hw_semaphore_82573(sc);
   12692 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12693 		    device_xname(sc->sc_dev));
   12694 		return -1;
   12695 	}
   12696 
   12697 	return 0;
   12698 }
   12699 
   12700 static void
   12701 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12702 {
   12703 	uint32_t reg;
   12704 
   12705 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12706 		device_xname(sc->sc_dev), __func__));
   12707 
   12708 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12709 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12710 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12711 }
   12712 
   12713 /*
   12714  * Management mode and power management related subroutines.
   12715  * BMC, AMT, suspend/resume and EEE.
   12716  */
   12717 
   12718 #ifdef WM_WOL
   12719 static int
   12720 wm_check_mng_mode(struct wm_softc *sc)
   12721 {
   12722 	int rv;
   12723 
   12724 	switch (sc->sc_type) {
   12725 	case WM_T_ICH8:
   12726 	case WM_T_ICH9:
   12727 	case WM_T_ICH10:
   12728 	case WM_T_PCH:
   12729 	case WM_T_PCH2:
   12730 	case WM_T_PCH_LPT:
   12731 	case WM_T_PCH_SPT:
   12732 		rv = wm_check_mng_mode_ich8lan(sc);
   12733 		break;
   12734 	case WM_T_82574:
   12735 	case WM_T_82583:
   12736 		rv = wm_check_mng_mode_82574(sc);
   12737 		break;
   12738 	case WM_T_82571:
   12739 	case WM_T_82572:
   12740 	case WM_T_82573:
   12741 	case WM_T_80003:
   12742 		rv = wm_check_mng_mode_generic(sc);
   12743 		break;
   12744 	default:
   12745 		/* noting to do */
   12746 		rv = 0;
   12747 		break;
   12748 	}
   12749 
   12750 	return rv;
   12751 }
   12752 
   12753 static int
   12754 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12755 {
   12756 	uint32_t fwsm;
   12757 
   12758 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12759 
   12760 	if (((fwsm & FWSM_FW_VALID) != 0)
   12761 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12762 		return 1;
   12763 
   12764 	return 0;
   12765 }
   12766 
   12767 static int
   12768 wm_check_mng_mode_82574(struct wm_softc *sc)
   12769 {
   12770 	uint16_t data;
   12771 
   12772 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12773 
   12774 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12775 		return 1;
   12776 
   12777 	return 0;
   12778 }
   12779 
   12780 static int
   12781 wm_check_mng_mode_generic(struct wm_softc *sc)
   12782 {
   12783 	uint32_t fwsm;
   12784 
   12785 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12786 
   12787 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12788 		return 1;
   12789 
   12790 	return 0;
   12791 }
   12792 #endif /* WM_WOL */
   12793 
   12794 static int
   12795 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12796 {
   12797 	uint32_t manc, fwsm, factps;
   12798 
   12799 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12800 		return 0;
   12801 
   12802 	manc = CSR_READ(sc, WMREG_MANC);
   12803 
   12804 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12805 		device_xname(sc->sc_dev), manc));
   12806 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12807 		return 0;
   12808 
   12809 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12810 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12811 		factps = CSR_READ(sc, WMREG_FACTPS);
   12812 		if (((factps & FACTPS_MNGCG) == 0)
   12813 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12814 			return 1;
   12815 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12816 		uint16_t data;
   12817 
   12818 		factps = CSR_READ(sc, WMREG_FACTPS);
   12819 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12820 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12821 			device_xname(sc->sc_dev), factps, data));
   12822 		if (((factps & FACTPS_MNGCG) == 0)
   12823 		    && ((data & NVM_CFG2_MNGM_MASK)
   12824 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12825 			return 1;
   12826 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12827 	    && ((manc & MANC_ASF_EN) == 0))
   12828 		return 1;
   12829 
   12830 	return 0;
   12831 }
   12832 
   12833 static bool
   12834 wm_phy_resetisblocked(struct wm_softc *sc)
   12835 {
   12836 	bool blocked = false;
   12837 	uint32_t reg;
   12838 	int i = 0;
   12839 
   12840 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12841 		device_xname(sc->sc_dev), __func__));
   12842 
   12843 	switch (sc->sc_type) {
   12844 	case WM_T_ICH8:
   12845 	case WM_T_ICH9:
   12846 	case WM_T_ICH10:
   12847 	case WM_T_PCH:
   12848 	case WM_T_PCH2:
   12849 	case WM_T_PCH_LPT:
   12850 	case WM_T_PCH_SPT:
   12851 		do {
   12852 			reg = CSR_READ(sc, WMREG_FWSM);
   12853 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12854 				blocked = true;
   12855 				delay(10*1000);
   12856 				continue;
   12857 			}
   12858 			blocked = false;
   12859 		} while (blocked && (i++ < 30));
   12860 		return blocked;
   12861 		break;
   12862 	case WM_T_82571:
   12863 	case WM_T_82572:
   12864 	case WM_T_82573:
   12865 	case WM_T_82574:
   12866 	case WM_T_82583:
   12867 	case WM_T_80003:
   12868 		reg = CSR_READ(sc, WMREG_MANC);
   12869 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12870 			return true;
   12871 		else
   12872 			return false;
   12873 		break;
   12874 	default:
   12875 		/* no problem */
   12876 		break;
   12877 	}
   12878 
   12879 	return false;
   12880 }
   12881 
   12882 static void
   12883 wm_get_hw_control(struct wm_softc *sc)
   12884 {
   12885 	uint32_t reg;
   12886 
   12887 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12888 		device_xname(sc->sc_dev), __func__));
   12889 
   12890 	if (sc->sc_type == WM_T_82573) {
   12891 		reg = CSR_READ(sc, WMREG_SWSM);
   12892 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12893 	} else if (sc->sc_type >= WM_T_82571) {
   12894 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12895 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12896 	}
   12897 }
   12898 
   12899 static void
   12900 wm_release_hw_control(struct wm_softc *sc)
   12901 {
   12902 	uint32_t reg;
   12903 
   12904 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12905 		device_xname(sc->sc_dev), __func__));
   12906 
   12907 	if (sc->sc_type == WM_T_82573) {
   12908 		reg = CSR_READ(sc, WMREG_SWSM);
   12909 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12910 	} else if (sc->sc_type >= WM_T_82571) {
   12911 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12912 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12913 	}
   12914 }
   12915 
   12916 static void
   12917 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12918 {
   12919 	uint32_t reg;
   12920 
   12921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12922 		device_xname(sc->sc_dev), __func__));
   12923 
   12924 	if (sc->sc_type < WM_T_PCH2)
   12925 		return;
   12926 
   12927 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12928 
   12929 	if (gate)
   12930 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12931 	else
   12932 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12933 
   12934 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12935 }
   12936 
   12937 static void
   12938 wm_smbustopci(struct wm_softc *sc)
   12939 {
   12940 	uint32_t fwsm, reg;
   12941 	int rv = 0;
   12942 
   12943 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12944 		device_xname(sc->sc_dev), __func__));
   12945 
   12946 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12947 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12948 
   12949 	/* Disable ULP */
   12950 	wm_ulp_disable(sc);
   12951 
   12952 	/* Acquire PHY semaphore */
   12953 	sc->phy.acquire(sc);
   12954 
   12955 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12956 	switch (sc->sc_type) {
   12957 	case WM_T_PCH_LPT:
   12958 	case WM_T_PCH_SPT:
   12959 		if (wm_phy_is_accessible_pchlan(sc))
   12960 			break;
   12961 
   12962 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12963 		reg |= CTRL_EXT_FORCE_SMBUS;
   12964 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12965 #if 0
   12966 		/* XXX Isn't this required??? */
   12967 		CSR_WRITE_FLUSH(sc);
   12968 #endif
   12969 		delay(50 * 1000);
   12970 		/* FALLTHROUGH */
   12971 	case WM_T_PCH2:
   12972 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12973 			break;
   12974 		/* FALLTHROUGH */
   12975 	case WM_T_PCH:
   12976 		if (sc->sc_type == WM_T_PCH)
   12977 			if ((fwsm & FWSM_FW_VALID) != 0)
   12978 				break;
   12979 
   12980 		if (wm_phy_resetisblocked(sc) == true) {
   12981 			printf("XXX reset is blocked(3)\n");
   12982 			break;
   12983 		}
   12984 
   12985 		wm_toggle_lanphypc_pch_lpt(sc);
   12986 
   12987 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12988 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12989 				break;
   12990 
   12991 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12992 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12994 
   12995 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12996 				break;
   12997 			rv = -1;
   12998 		}
   12999 		break;
   13000 	default:
   13001 		break;
   13002 	}
   13003 
   13004 	/* Release semaphore */
   13005 	sc->phy.release(sc);
   13006 
   13007 	if (rv == 0) {
   13008 		if (wm_phy_resetisblocked(sc)) {
   13009 			printf("XXX reset is blocked(4)\n");
   13010 			goto out;
   13011 		}
   13012 		wm_reset_phy(sc);
   13013 		if (wm_phy_resetisblocked(sc))
   13014 			printf("XXX reset is blocked(4)\n");
   13015 	}
   13016 
   13017 out:
   13018 	/*
   13019 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13020 	 */
   13021 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13022 		delay(10*1000);
   13023 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13024 	}
   13025 }
   13026 
   13027 static void
   13028 wm_init_manageability(struct wm_softc *sc)
   13029 {
   13030 
   13031 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13032 		device_xname(sc->sc_dev), __func__));
   13033 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13034 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13035 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13036 
   13037 		/* Disable hardware interception of ARP */
   13038 		manc &= ~MANC_ARP_EN;
   13039 
   13040 		/* Enable receiving management packets to the host */
   13041 		if (sc->sc_type >= WM_T_82571) {
   13042 			manc |= MANC_EN_MNG2HOST;
   13043 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13044 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13045 		}
   13046 
   13047 		CSR_WRITE(sc, WMREG_MANC, manc);
   13048 	}
   13049 }
   13050 
   13051 static void
   13052 wm_release_manageability(struct wm_softc *sc)
   13053 {
   13054 
   13055 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13056 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13057 
   13058 		manc |= MANC_ARP_EN;
   13059 		if (sc->sc_type >= WM_T_82571)
   13060 			manc &= ~MANC_EN_MNG2HOST;
   13061 
   13062 		CSR_WRITE(sc, WMREG_MANC, manc);
   13063 	}
   13064 }
   13065 
   13066 static void
   13067 wm_get_wakeup(struct wm_softc *sc)
   13068 {
   13069 
   13070 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13071 	switch (sc->sc_type) {
   13072 	case WM_T_82573:
   13073 	case WM_T_82583:
   13074 		sc->sc_flags |= WM_F_HAS_AMT;
   13075 		/* FALLTHROUGH */
   13076 	case WM_T_80003:
   13077 	case WM_T_82575:
   13078 	case WM_T_82576:
   13079 	case WM_T_82580:
   13080 	case WM_T_I350:
   13081 	case WM_T_I354:
   13082 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13083 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13084 		/* FALLTHROUGH */
   13085 	case WM_T_82541:
   13086 	case WM_T_82541_2:
   13087 	case WM_T_82547:
   13088 	case WM_T_82547_2:
   13089 	case WM_T_82571:
   13090 	case WM_T_82572:
   13091 	case WM_T_82574:
   13092 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13093 		break;
   13094 	case WM_T_ICH8:
   13095 	case WM_T_ICH9:
   13096 	case WM_T_ICH10:
   13097 	case WM_T_PCH:
   13098 	case WM_T_PCH2:
   13099 	case WM_T_PCH_LPT:
   13100 	case WM_T_PCH_SPT:
   13101 		sc->sc_flags |= WM_F_HAS_AMT;
   13102 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13103 		break;
   13104 	default:
   13105 		break;
   13106 	}
   13107 
   13108 	/* 1: HAS_MANAGE */
   13109 	if (wm_enable_mng_pass_thru(sc) != 0)
   13110 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13111 
   13112 	/*
   13113 	 * Note that the WOL flags is set after the resetting of the eeprom
   13114 	 * stuff
   13115 	 */
   13116 }
   13117 
   13118 /*
   13119  * Unconfigure Ultra Low Power mode.
   13120  * Only for I217 and newer (see below).
   13121  */
   13122 static void
   13123 wm_ulp_disable(struct wm_softc *sc)
   13124 {
   13125 	uint32_t reg;
   13126 	int i = 0;
   13127 
   13128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13129 		device_xname(sc->sc_dev), __func__));
   13130 	/* Exclude old devices */
   13131 	if ((sc->sc_type < WM_T_PCH_LPT)
   13132 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13133 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13134 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13135 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13136 		return;
   13137 
   13138 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13139 		/* Request ME un-configure ULP mode in the PHY */
   13140 		reg = CSR_READ(sc, WMREG_H2ME);
   13141 		reg &= ~H2ME_ULP;
   13142 		reg |= H2ME_ENFORCE_SETTINGS;
   13143 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13144 
   13145 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13146 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13147 			if (i++ == 30) {
   13148 				printf("%s timed out\n", __func__);
   13149 				return;
   13150 			}
   13151 			delay(10 * 1000);
   13152 		}
   13153 		reg = CSR_READ(sc, WMREG_H2ME);
   13154 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13155 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13156 
   13157 		return;
   13158 	}
   13159 
   13160 	/* Acquire semaphore */
   13161 	sc->phy.acquire(sc);
   13162 
   13163 	/* Toggle LANPHYPC */
   13164 	wm_toggle_lanphypc_pch_lpt(sc);
   13165 
   13166 	/* Unforce SMBus mode in PHY */
   13167 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13168 	if (reg == 0x0000 || reg == 0xffff) {
   13169 		uint32_t reg2;
   13170 
   13171 		printf("%s: Force SMBus first.\n", __func__);
   13172 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13173 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13174 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13175 		delay(50 * 1000);
   13176 
   13177 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13178 	}
   13179 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13180 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13181 
   13182 	/* Unforce SMBus mode in MAC */
   13183 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13184 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13185 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13186 
   13187 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13188 	reg |= HV_PM_CTRL_K1_ENA;
   13189 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13190 
   13191 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13192 	reg &= ~(I218_ULP_CONFIG1_IND
   13193 	    | I218_ULP_CONFIG1_STICKY_ULP
   13194 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13195 	    | I218_ULP_CONFIG1_WOL_HOST
   13196 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13197 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13198 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13199 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13200 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13201 	reg |= I218_ULP_CONFIG1_START;
   13202 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13203 
   13204 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13205 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13206 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13207 
   13208 	/* Release semaphore */
   13209 	sc->phy.release(sc);
   13210 	wm_gmii_reset(sc);
   13211 	delay(50 * 1000);
   13212 }
   13213 
   13214 /* WOL in the newer chipset interfaces (pchlan) */
   13215 static void
   13216 wm_enable_phy_wakeup(struct wm_softc *sc)
   13217 {
   13218 #if 0
   13219 	uint16_t preg;
   13220 
   13221 	/* Copy MAC RARs to PHY RARs */
   13222 
   13223 	/* Copy MAC MTA to PHY MTA */
   13224 
   13225 	/* Configure PHY Rx Control register */
   13226 
   13227 	/* Enable PHY wakeup in MAC register */
   13228 
   13229 	/* Configure and enable PHY wakeup in PHY registers */
   13230 
   13231 	/* Activate PHY wakeup */
   13232 
   13233 	/* XXX */
   13234 #endif
   13235 }
   13236 
   13237 /* Power down workaround on D3 */
   13238 static void
   13239 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13240 {
   13241 	uint32_t reg;
   13242 	int i;
   13243 
   13244 	for (i = 0; i < 2; i++) {
   13245 		/* Disable link */
   13246 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13247 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13248 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13249 
   13250 		/*
   13251 		 * Call gig speed drop workaround on Gig disable before
   13252 		 * accessing any PHY registers
   13253 		 */
   13254 		if (sc->sc_type == WM_T_ICH8)
   13255 			wm_gig_downshift_workaround_ich8lan(sc);
   13256 
   13257 		/* Write VR power-down enable */
   13258 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13259 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13260 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13261 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13262 
   13263 		/* Read it back and test */
   13264 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13265 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13266 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13267 			break;
   13268 
   13269 		/* Issue PHY reset and repeat at most one more time */
   13270 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13271 	}
   13272 }
   13273 
   13274 static void
   13275 wm_enable_wakeup(struct wm_softc *sc)
   13276 {
   13277 	uint32_t reg, pmreg;
   13278 	pcireg_t pmode;
   13279 
   13280 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13281 		device_xname(sc->sc_dev), __func__));
   13282 
   13283 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13284 		&pmreg, NULL) == 0)
   13285 		return;
   13286 
   13287 	/* Advertise the wakeup capability */
   13288 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13289 	    | CTRL_SWDPIN(3));
   13290 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13291 
   13292 	/* ICH workaround */
   13293 	switch (sc->sc_type) {
   13294 	case WM_T_ICH8:
   13295 	case WM_T_ICH9:
   13296 	case WM_T_ICH10:
   13297 	case WM_T_PCH:
   13298 	case WM_T_PCH2:
   13299 	case WM_T_PCH_LPT:
   13300 	case WM_T_PCH_SPT:
   13301 		/* Disable gig during WOL */
   13302 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13303 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13304 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13305 		if (sc->sc_type == WM_T_PCH)
   13306 			wm_gmii_reset(sc);
   13307 
   13308 		/* Power down workaround */
   13309 		if (sc->sc_phytype == WMPHY_82577) {
   13310 			struct mii_softc *child;
   13311 
   13312 			/* Assume that the PHY is copper */
   13313 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13314 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13315 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13316 				    (768 << 5) | 25, 0x0444); /* magic num */
   13317 		}
   13318 		break;
   13319 	default:
   13320 		break;
   13321 	}
   13322 
   13323 	/* Keep the laser running on fiber adapters */
   13324 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13325 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13326 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13327 		reg |= CTRL_EXT_SWDPIN(3);
   13328 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13329 	}
   13330 
   13331 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13332 #if 0	/* for the multicast packet */
   13333 	reg |= WUFC_MC;
   13334 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13335 #endif
   13336 
   13337 	if (sc->sc_type >= WM_T_PCH)
   13338 		wm_enable_phy_wakeup(sc);
   13339 	else {
   13340 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13341 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13342 	}
   13343 
   13344 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13345 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13346 		|| (sc->sc_type == WM_T_PCH2))
   13347 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13348 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13349 
   13350 	/* Request PME */
   13351 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13352 #if 0
   13353 	/* Disable WOL */
   13354 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13355 #else
   13356 	/* For WOL */
   13357 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13358 #endif
   13359 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13360 }
   13361 
   13362 /* LPLU */
   13363 
   13364 static void
   13365 wm_lplu_d0_disable(struct wm_softc *sc)
   13366 {
   13367 	struct mii_data *mii = &sc->sc_mii;
   13368 	uint32_t reg;
   13369 
   13370 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13371 		device_xname(sc->sc_dev), __func__));
   13372 
   13373 	if (sc->sc_phytype == WMPHY_IFE)
   13374 		return;
   13375 
   13376 	switch (sc->sc_type) {
   13377 	case WM_T_82571:
   13378 	case WM_T_82572:
   13379 	case WM_T_82573:
   13380 	case WM_T_82575:
   13381 	case WM_T_82576:
   13382 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13383 		reg &= ~PMR_D0_LPLU;
   13384 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13385 		break;
   13386 	case WM_T_82580:
   13387 	case WM_T_I350:
   13388 	case WM_T_I210:
   13389 	case WM_T_I211:
   13390 		reg = CSR_READ(sc, WMREG_PHPM);
   13391 		reg &= ~PHPM_D0A_LPLU;
   13392 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13393 		break;
   13394 	case WM_T_82574:
   13395 	case WM_T_82583:
   13396 	case WM_T_ICH8:
   13397 	case WM_T_ICH9:
   13398 	case WM_T_ICH10:
   13399 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13400 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13401 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13402 		CSR_WRITE_FLUSH(sc);
   13403 		break;
   13404 	case WM_T_PCH:
   13405 	case WM_T_PCH2:
   13406 	case WM_T_PCH_LPT:
   13407 	case WM_T_PCH_SPT:
   13408 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13409 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13410 		if (wm_phy_resetisblocked(sc) == false)
   13411 			reg |= HV_OEM_BITS_ANEGNOW;
   13412 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13413 		break;
   13414 	default:
   13415 		break;
   13416 	}
   13417 }
   13418 
   13419 /* EEE */
   13420 
   13421 static void
   13422 wm_set_eee_i350(struct wm_softc *sc)
   13423 {
   13424 	uint32_t ipcnfg, eeer;
   13425 
   13426 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13427 	eeer = CSR_READ(sc, WMREG_EEER);
   13428 
   13429 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13430 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13431 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13432 		    | EEER_LPI_FC);
   13433 	} else {
   13434 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13435 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13436 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13437 		    | EEER_LPI_FC);
   13438 	}
   13439 
   13440 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13441 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13442 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13443 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13444 }
   13445 
   13446 /*
   13447  * Workarounds (mainly PHY related).
   13448  * Basically, PHY's workarounds are in the PHY drivers.
   13449  */
   13450 
   13451 /* Work-around for 82566 Kumeran PCS lock loss */
   13452 static void
   13453 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13454 {
   13455 #if 0
   13456 	int miistatus, active, i;
   13457 	int reg;
   13458 
   13459 	miistatus = sc->sc_mii.mii_media_status;
   13460 
   13461 	/* If the link is not up, do nothing */
   13462 	if ((miistatus & IFM_ACTIVE) == 0)
   13463 		return;
   13464 
   13465 	active = sc->sc_mii.mii_media_active;
   13466 
   13467 	/* Nothing to do if the link is other than 1Gbps */
   13468 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13469 		return;
   13470 
   13471 	for (i = 0; i < 10; i++) {
   13472 		/* read twice */
   13473 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13474 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13475 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13476 			goto out;	/* GOOD! */
   13477 
   13478 		/* Reset the PHY */
   13479 		wm_gmii_reset(sc);
   13480 		delay(5*1000);
   13481 	}
   13482 
   13483 	/* Disable GigE link negotiation */
   13484 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13485 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13486 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13487 
   13488 	/*
   13489 	 * Call gig speed drop workaround on Gig disable before accessing
   13490 	 * any PHY registers.
   13491 	 */
   13492 	wm_gig_downshift_workaround_ich8lan(sc);
   13493 
   13494 out:
   13495 	return;
   13496 #endif
   13497 }
   13498 
   13499 /* WOL from S5 stops working */
   13500 static void
   13501 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13502 {
   13503 	uint16_t kmrn_reg;
   13504 
   13505 	/* Only for igp3 */
   13506 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13507 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13508 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13509 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13510 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13511 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13512 	}
   13513 }
   13514 
   13515 /*
   13516  * Workaround for pch's PHYs
   13517  * XXX should be moved to new PHY driver?
   13518  */
   13519 static void
   13520 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13521 {
   13522 
   13523 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13524 		device_xname(sc->sc_dev), __func__));
   13525 	KASSERT(sc->sc_type == WM_T_PCH);
   13526 
   13527 	if (sc->sc_phytype == WMPHY_82577)
   13528 		wm_set_mdio_slow_mode_hv(sc);
   13529 
   13530 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13531 
   13532 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13533 
   13534 	/* 82578 */
   13535 	if (sc->sc_phytype == WMPHY_82578) {
   13536 		struct mii_softc *child;
   13537 
   13538 		/*
   13539 		 * Return registers to default by doing a soft reset then
   13540 		 * writing 0x3140 to the control register
   13541 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13542 		 */
   13543 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13544 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13545 			PHY_RESET(child);
   13546 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13547 			    0x3140);
   13548 		}
   13549 	}
   13550 
   13551 	/* Select page 0 */
   13552 	sc->phy.acquire(sc);
   13553 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13554 	sc->phy.release(sc);
   13555 
   13556 	/*
   13557 	 * Configure the K1 Si workaround during phy reset assuming there is
   13558 	 * link so that it disables K1 if link is in 1Gbps.
   13559 	 */
   13560 	wm_k1_gig_workaround_hv(sc, 1);
   13561 }
   13562 
   13563 static void
   13564 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13565 {
   13566 
   13567 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13568 		device_xname(sc->sc_dev), __func__));
   13569 	KASSERT(sc->sc_type == WM_T_PCH2);
   13570 
   13571 	wm_set_mdio_slow_mode_hv(sc);
   13572 }
   13573 
   13574 static int
   13575 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13576 {
   13577 	int k1_enable = sc->sc_nvm_k1_enabled;
   13578 
   13579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13580 		device_xname(sc->sc_dev), __func__));
   13581 
   13582 	if (sc->phy.acquire(sc) != 0)
   13583 		return -1;
   13584 
   13585 	if (link) {
   13586 		k1_enable = 0;
   13587 
   13588 		/* Link stall fix for link up */
   13589 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13590 	} else {
   13591 		/* Link stall fix for link down */
   13592 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13593 	}
   13594 
   13595 	wm_configure_k1_ich8lan(sc, k1_enable);
   13596 	sc->phy.release(sc);
   13597 
   13598 	return 0;
   13599 }
   13600 
   13601 static void
   13602 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13603 {
   13604 	uint32_t reg;
   13605 
   13606 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13607 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13608 	    reg | HV_KMRN_MDIO_SLOW);
   13609 }
   13610 
   13611 static void
   13612 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13613 {
   13614 	uint32_t ctrl, ctrl_ext, tmp;
   13615 	uint16_t kmrn_reg;
   13616 
   13617 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13618 
   13619 	if (k1_enable)
   13620 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13621 	else
   13622 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13623 
   13624 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13625 
   13626 	delay(20);
   13627 
   13628 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13629 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13630 
   13631 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13632 	tmp |= CTRL_FRCSPD;
   13633 
   13634 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13635 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13636 	CSR_WRITE_FLUSH(sc);
   13637 	delay(20);
   13638 
   13639 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13640 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13641 	CSR_WRITE_FLUSH(sc);
   13642 	delay(20);
   13643 }
   13644 
   13645 /* special case - for 82575 - need to do manual init ... */
   13646 static void
   13647 wm_reset_init_script_82575(struct wm_softc *sc)
   13648 {
   13649 	/*
   13650 	 * remark: this is untested code - we have no board without EEPROM
   13651 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13652 	 */
   13653 
   13654 	/* SerDes configuration via SERDESCTRL */
   13655 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13658 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13659 
   13660 	/* CCM configuration via CCMCTL register */
   13661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13662 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13663 
   13664 	/* PCIe lanes configuration */
   13665 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13666 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13667 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13669 
   13670 	/* PCIe PLL Configuration */
   13671 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13672 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13673 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13674 }
   13675 
   13676 static void
   13677 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13678 {
   13679 	uint32_t reg;
   13680 	uint16_t nvmword;
   13681 	int rv;
   13682 
   13683 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13684 		return;
   13685 
   13686 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13687 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13688 	if (rv != 0) {
   13689 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13690 		    __func__);
   13691 		return;
   13692 	}
   13693 
   13694 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13695 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13696 		reg |= MDICNFG_DEST;
   13697 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13698 		reg |= MDICNFG_COM_MDIO;
   13699 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13700 }
   13701 
   13702 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13703 
   13704 static bool
   13705 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13706 {
   13707 	int i;
   13708 	uint32_t reg;
   13709 	uint16_t id1, id2;
   13710 
   13711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13712 		device_xname(sc->sc_dev), __func__));
   13713 	id1 = id2 = 0xffff;
   13714 	for (i = 0; i < 2; i++) {
   13715 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13716 		if (MII_INVALIDID(id1))
   13717 			continue;
   13718 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13719 		if (MII_INVALIDID(id2))
   13720 			continue;
   13721 		break;
   13722 	}
   13723 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13724 		goto out;
   13725 	}
   13726 
   13727 	if (sc->sc_type < WM_T_PCH_LPT) {
   13728 		sc->phy.release(sc);
   13729 		wm_set_mdio_slow_mode_hv(sc);
   13730 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13731 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13732 		sc->phy.acquire(sc);
   13733 	}
   13734 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13735 		printf("XXX return with false\n");
   13736 		return false;
   13737 	}
   13738 out:
   13739 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13740 		/* Only unforce SMBus if ME is not active */
   13741 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13742 			/* Unforce SMBus mode in PHY */
   13743 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13744 			    CV_SMB_CTRL);
   13745 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13746 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13747 			    CV_SMB_CTRL, reg);
   13748 
   13749 			/* Unforce SMBus mode in MAC */
   13750 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13751 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13752 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13753 		}
   13754 	}
   13755 	return true;
   13756 }
   13757 
   13758 static void
   13759 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13760 {
   13761 	uint32_t reg;
   13762 	int i;
   13763 
   13764 	/* Set PHY Config Counter to 50msec */
   13765 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13766 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13767 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13768 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13769 
   13770 	/* Toggle LANPHYPC */
   13771 	reg = CSR_READ(sc, WMREG_CTRL);
   13772 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13773 	reg &= ~CTRL_LANPHYPC_VALUE;
   13774 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13775 	CSR_WRITE_FLUSH(sc);
   13776 	delay(1000);
   13777 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13778 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13779 	CSR_WRITE_FLUSH(sc);
   13780 
   13781 	if (sc->sc_type < WM_T_PCH_LPT)
   13782 		delay(50 * 1000);
   13783 	else {
   13784 		i = 20;
   13785 
   13786 		do {
   13787 			delay(5 * 1000);
   13788 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13789 		    && i--);
   13790 
   13791 		delay(30 * 1000);
   13792 	}
   13793 }
   13794 
   13795 static int
   13796 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13797 {
   13798 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13799 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13800 	uint32_t rxa;
   13801 	uint16_t scale = 0, lat_enc = 0;
   13802 	int32_t obff_hwm = 0;
   13803 	int64_t lat_ns, value;
   13804 
   13805 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13806 		device_xname(sc->sc_dev), __func__));
   13807 
   13808 	if (link) {
   13809 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13810 		uint32_t status;
   13811 		uint16_t speed;
   13812 		pcireg_t preg;
   13813 
   13814 		status = CSR_READ(sc, WMREG_STATUS);
   13815 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13816 		case STATUS_SPEED_10:
   13817 			speed = 10;
   13818 			break;
   13819 		case STATUS_SPEED_100:
   13820 			speed = 100;
   13821 			break;
   13822 		case STATUS_SPEED_1000:
   13823 			speed = 1000;
   13824 			break;
   13825 		default:
   13826 			device_printf(sc->sc_dev, "Unknown speed "
   13827 			    "(status = %08x)\n", status);
   13828 			return -1;
   13829 		}
   13830 
   13831 		/* Rx Packet Buffer Allocation size (KB) */
   13832 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13833 
   13834 		/*
   13835 		 * Determine the maximum latency tolerated by the device.
   13836 		 *
   13837 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13838 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13839 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13840 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13841 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13842 		 */
   13843 		lat_ns = ((int64_t)rxa * 1024 -
   13844 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   13845 			+ ETHER_HDR_LEN))) * 8 * 1000;
   13846 		if (lat_ns < 0)
   13847 			lat_ns = 0;
   13848 		else
   13849 			lat_ns /= speed;
   13850 		value = lat_ns;
   13851 
   13852 		while (value > LTRV_VALUE) {
   13853 			scale ++;
   13854 			value = howmany(value, __BIT(5));
   13855 		}
   13856 		if (scale > LTRV_SCALE_MAX) {
   13857 			printf("%s: Invalid LTR latency scale %d\n",
   13858 			    device_xname(sc->sc_dev), scale);
   13859 			return -1;
   13860 		}
   13861 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13862 
   13863 		/* Determine the maximum latency tolerated by the platform */
   13864 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13865 		    WM_PCI_LTR_CAP_LPT);
   13866 		max_snoop = preg & 0xffff;
   13867 		max_nosnoop = preg >> 16;
   13868 
   13869 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13870 
   13871 		if (lat_enc > max_ltr_enc) {
   13872 			lat_enc = max_ltr_enc;
   13873 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   13874 			    * PCI_LTR_SCALETONS(
   13875 				    __SHIFTOUT(lat_enc,
   13876 					PCI_LTR_MAXSNOOPLAT_SCALE));
   13877 		}
   13878 
   13879 		if (lat_ns) {
   13880 			lat_ns *= speed * 1000;
   13881 			lat_ns /= 8;
   13882 			lat_ns /= 1000000000;
   13883 			obff_hwm = (int32_t)(rxa - lat_ns);
   13884 		}
   13885 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   13886 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   13887 			    "(rxa = %d, lat_ns = %d)\n",
   13888 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   13889 			return -1;
   13890 		}
   13891 	}
   13892 	/* Snoop and No-Snoop latencies the same */
   13893 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13894 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13895 
   13896 	/* Set OBFF high water mark */
   13897 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   13898 	reg |= obff_hwm;
   13899 	CSR_WRITE(sc, WMREG_SVT, reg);
   13900 
   13901 	/* Enable OBFF */
   13902 	reg = CSR_READ(sc, WMREG_SVCR);
   13903 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   13904 	CSR_WRITE(sc, WMREG_SVCR, reg);
   13905 
   13906 	return 0;
   13907 }
   13908 
   13909 /*
   13910  * I210 Errata 25 and I211 Errata 10
   13911  * Slow System Clock.
   13912  */
   13913 static void
   13914 wm_pll_workaround_i210(struct wm_softc *sc)
   13915 {
   13916 	uint32_t mdicnfg, wuc;
   13917 	uint32_t reg;
   13918 	pcireg_t pcireg;
   13919 	uint32_t pmreg;
   13920 	uint16_t nvmword, tmp_nvmword;
   13921 	int phyval;
   13922 	bool wa_done = false;
   13923 	int i;
   13924 
   13925 	/* Save WUC and MDICNFG registers */
   13926 	wuc = CSR_READ(sc, WMREG_WUC);
   13927 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13928 
   13929 	reg = mdicnfg & ~MDICNFG_DEST;
   13930 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13931 
   13932 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13933 		nvmword = INVM_DEFAULT_AL;
   13934 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13935 
   13936 	/* Get Power Management cap offset */
   13937 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13938 		&pmreg, NULL) == 0)
   13939 		return;
   13940 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13941 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13942 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13943 
   13944 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13945 			break; /* OK */
   13946 		}
   13947 
   13948 		wa_done = true;
   13949 		/* Directly reset the internal PHY */
   13950 		reg = CSR_READ(sc, WMREG_CTRL);
   13951 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13952 
   13953 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13954 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13956 
   13957 		CSR_WRITE(sc, WMREG_WUC, 0);
   13958 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13959 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13960 
   13961 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13962 		    pmreg + PCI_PMCSR);
   13963 		pcireg |= PCI_PMCSR_STATE_D3;
   13964 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13965 		    pmreg + PCI_PMCSR, pcireg);
   13966 		delay(1000);
   13967 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13968 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13969 		    pmreg + PCI_PMCSR, pcireg);
   13970 
   13971 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13972 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13973 
   13974 		/* Restore WUC register */
   13975 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13976 	}
   13977 
   13978 	/* Restore MDICNFG setting */
   13979 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13980 	if (wa_done)
   13981 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13982 }
   13983 
   13984 static void
   13985 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   13986 {
   13987 	uint32_t reg;
   13988 
   13989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13990 		device_xname(sc->sc_dev), __func__));
   13991 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   13992 
   13993 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13994 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   13995 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13996 
   13997 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   13998 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   13999 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14000 }
   14001