Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.517
      1 /*	$NetBSD: if_wm.c,v 1.517 2017/06/26 04:22:46 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.517 2017/06/26 04:22:46 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_phy_post_reset(struct wm_softc *);
    683 static void	wm_initialize_hardware_bits(struct wm_softc *);
    684 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    685 static void	wm_reset_phy(struct wm_softc *);
    686 static void	wm_flush_desc_rings(struct wm_softc *);
    687 static void	wm_reset(struct wm_softc *);
    688 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    689 static void	wm_rxdrain(struct wm_rxqueue *);
    690 static void	wm_rss_getkey(uint8_t *);
    691 static void	wm_init_rss(struct wm_softc *);
    692 static void	wm_adjust_qnum(struct wm_softc *, int);
    693 static inline bool	wm_is_using_msix(struct wm_softc *);
    694 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    695 static int	wm_softint_establish(struct wm_softc *, int, int);
    696 static int	wm_setup_legacy(struct wm_softc *);
    697 static int	wm_setup_msix(struct wm_softc *);
    698 static int	wm_init(struct ifnet *);
    699 static int	wm_init_locked(struct ifnet *);
    700 static void	wm_turnon(struct wm_softc *);
    701 static void	wm_turnoff(struct wm_softc *);
    702 static void	wm_stop(struct ifnet *, int);
    703 static void	wm_stop_locked(struct ifnet *, int);
    704 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    705 static void	wm_82547_txfifo_stall(void *);
    706 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    707 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    708 /* DMA related */
    709 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    713     struct wm_txqueue *);
    714 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    716 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    717     struct wm_rxqueue *);
    718 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    721 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    725     struct wm_txqueue *);
    726 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    727     struct wm_rxqueue *);
    728 static int	wm_alloc_txrx_queues(struct wm_softc *);
    729 static void	wm_free_txrx_queues(struct wm_softc *);
    730 static int	wm_init_txrx_queues(struct wm_softc *);
    731 /* Start */
    732 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    733     struct wm_txsoft *, uint32_t *, uint8_t *);
    734 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    735 static void	wm_start(struct ifnet *);
    736 static void	wm_start_locked(struct ifnet *);
    737 static int	wm_transmit(struct ifnet *, struct mbuf *);
    738 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    739 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    740 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    741     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    742 static void	wm_nq_start(struct ifnet *);
    743 static void	wm_nq_start_locked(struct ifnet *);
    744 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    745 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    746 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    747 static void	wm_deferred_start_locked(struct wm_txqueue *);
    748 static void	wm_handle_queue(void *);
    749 /* Interrupt */
    750 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    752 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    755 static void	wm_linkintr(struct wm_softc *, uint32_t);
    756 static int	wm_intr_legacy(void *);
    757 static inline void	wm_txrxintr_disable(struct wm_queue *);
    758 static inline void	wm_txrxintr_enable(struct wm_queue *);
    759 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    760 static int	wm_txrxintr_msix(void *);
    761 static int	wm_linkintr_msix(void *);
    762 
    763 /*
    764  * Media related.
    765  * GMII, SGMII, TBI, SERDES and SFP.
    766  */
    767 /* Common */
    768 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    769 /* GMII related */
    770 static void	wm_gmii_reset(struct wm_softc *);
    771 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    772 static int	wm_get_phy_id_82575(struct wm_softc *);
    773 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    774 static int	wm_gmii_mediachange(struct ifnet *);
    775 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    776 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    777 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    778 static int	wm_gmii_i82543_readreg(device_t, int, int);
    779 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    780 static int	wm_gmii_mdic_readreg(device_t, int, int);
    781 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    782 static int	wm_gmii_i82544_readreg(device_t, int, int);
    783 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    784 static int	wm_gmii_i80003_readreg(device_t, int, int);
    785 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    786 static int	wm_gmii_bm_readreg(device_t, int, int);
    787 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    788 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    789 static int	wm_gmii_hv_readreg(device_t, int, int);
    790 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    791 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    792 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    793 static int	wm_gmii_82580_readreg(device_t, int, int);
    794 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    795 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    796 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    797 static void	wm_gmii_statchg(struct ifnet *);
    798 /*
    799  * kumeran related (80003, ICH* and PCH*).
    800  * These functions are not for accessing MII registers but for accessing
    801  * kumeran specific registers.
    802  */
    803 static int	wm_kmrn_readreg(struct wm_softc *, int);
    804 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    805 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    806 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    807 /* SGMII */
    808 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    809 static int	wm_sgmii_readreg(device_t, int, int);
    810 static void	wm_sgmii_writereg(device_t, int, int, int);
    811 /* TBI related */
    812 static void	wm_tbi_mediainit(struct wm_softc *);
    813 static int	wm_tbi_mediachange(struct ifnet *);
    814 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static int	wm_check_for_link(struct wm_softc *);
    816 static void	wm_tbi_tick(struct wm_softc *);
    817 /* SERDES related */
    818 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    819 static int	wm_serdes_mediachange(struct ifnet *);
    820 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_serdes_tick(struct wm_softc *);
    822 /* SFP related */
    823 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    824 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    825 
    826 /*
    827  * NVM related.
    828  * Microwire, SPI (w/wo EERD) and Flash.
    829  */
    830 /* Misc functions */
    831 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    832 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    833 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    834 /* Microwire */
    835 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    836 /* SPI */
    837 static int	wm_nvm_ready_spi(struct wm_softc *);
    838 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    839 /* Using with EERD */
    840 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    841 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    842 /* Flash */
    843 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    844     unsigned int *);
    845 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    846 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    847 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    848 	uint32_t *);
    849 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    850 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    851 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    852 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    853 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    854 /* iNVM */
    855 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    856 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    857 /* Lock, detecting NVM type, validate checksum and read */
    858 static int	wm_nvm_acquire(struct wm_softc *);
    859 static void	wm_nvm_release(struct wm_softc *);
    860 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    861 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    862 static int	wm_nvm_validate_checksum(struct wm_softc *);
    863 static void	wm_nvm_version_invm(struct wm_softc *);
    864 static void	wm_nvm_version(struct wm_softc *);
    865 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    866 
    867 /*
    868  * Hardware semaphores.
    869  * Very complexed...
    870  */
    871 static int	wm_get_null(struct wm_softc *);
    872 static void	wm_put_null(struct wm_softc *);
    873 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    874 static void	wm_put_swsm_semaphore(struct wm_softc *);
    875 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    877 static int	wm_get_phy_82575(struct wm_softc *);
    878 static void	wm_put_phy_82575(struct wm_softc *);
    879 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    880 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    881 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    882 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    883 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    884 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    885 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    886 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    887 
    888 /*
    889  * Management mode and power management related subroutines.
    890  * BMC, AMT, suspend/resume and EEE.
    891  */
    892 #if 0
    893 static int	wm_check_mng_mode(struct wm_softc *);
    894 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    895 static int	wm_check_mng_mode_82574(struct wm_softc *);
    896 static int	wm_check_mng_mode_generic(struct wm_softc *);
    897 #endif
    898 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    899 static bool	wm_phy_resetisblocked(struct wm_softc *);
    900 static void	wm_get_hw_control(struct wm_softc *);
    901 static void	wm_release_hw_control(struct wm_softc *);
    902 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    903 static void	wm_smbustopci(struct wm_softc *);
    904 static void	wm_init_manageability(struct wm_softc *);
    905 static void	wm_release_manageability(struct wm_softc *);
    906 static void	wm_get_wakeup(struct wm_softc *);
    907 static void	wm_ulp_disable(struct wm_softc *);
    908 static void	wm_enable_phy_wakeup(struct wm_softc *);
    909 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    910 static void	wm_enable_wakeup(struct wm_softc *);
    911 /* LPLU (Low Power Link Up) */
    912 static void	wm_lplu_d0_disable(struct wm_softc *);
    913 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    914 /* EEE */
    915 static void	wm_set_eee_i350(struct wm_softc *);
    916 
    917 /*
    918  * Workarounds (mainly PHY related).
    919  * Basically, PHY's workarounds are in the PHY drivers.
    920  */
    921 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    924 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    925 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    926 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    927 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    928 static void	wm_reset_init_script_82575(struct wm_softc *);
    929 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    930 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    931 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    932 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    933 static void	wm_pll_workaround_i210(struct wm_softc *);
    934 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    935 
    936 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    937     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    938 
    939 /*
    940  * Devices supported by this driver.
    941  */
    942 static const struct wm_product {
    943 	pci_vendor_id_t		wmp_vendor;
    944 	pci_product_id_t	wmp_product;
    945 	const char		*wmp_name;
    946 	wm_chip_type		wmp_type;
    947 	uint32_t		wmp_flags;
    948 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    949 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    950 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    951 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    952 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    953 } wm_products[] = {
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    955 	  "Intel i82542 1000BASE-X Ethernet",
    956 	  WM_T_82542_2_1,	WMP_F_FIBER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    959 	  "Intel i82543GC 1000BASE-X Ethernet",
    960 	  WM_T_82543,		WMP_F_FIBER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    963 	  "Intel i82543GC 1000BASE-T Ethernet",
    964 	  WM_T_82543,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    967 	  "Intel i82544EI 1000BASE-T Ethernet",
    968 	  WM_T_82544,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    971 	  "Intel i82544EI 1000BASE-X Ethernet",
    972 	  WM_T_82544,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    975 	  "Intel i82544GC 1000BASE-T Ethernet",
    976 	  WM_T_82544,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    979 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    983 	  "Intel i82540EM 1000BASE-T Ethernet",
    984 	  WM_T_82540,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    987 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    988 	  WM_T_82540,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    991 	  "Intel i82540EP 1000BASE-T Ethernet",
    992 	  WM_T_82540,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    995 	  "Intel i82540EP 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    999 	  "Intel i82540EP 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1003 	  "Intel i82545EM 1000BASE-T Ethernet",
   1004 	  WM_T_82545,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1007 	  "Intel i82545GM 1000BASE-T Ethernet",
   1008 	  WM_T_82545_3,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1011 	  "Intel i82545GM 1000BASE-X Ethernet",
   1012 	  WM_T_82545_3,		WMP_F_FIBER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1015 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1016 	  WM_T_82545_3,		WMP_F_SERDES },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1019 	  "Intel i82546EB 1000BASE-T Ethernet",
   1020 	  WM_T_82546,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1023 	  "Intel i82546EB 1000BASE-T Ethernet",
   1024 	  WM_T_82546,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1027 	  "Intel i82545EM 1000BASE-X Ethernet",
   1028 	  WM_T_82545,		WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1031 	  "Intel i82546EB 1000BASE-X Ethernet",
   1032 	  WM_T_82546,		WMP_F_FIBER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1035 	  "Intel i82546GB 1000BASE-T Ethernet",
   1036 	  WM_T_82546_3,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1039 	  "Intel i82546GB 1000BASE-X Ethernet",
   1040 	  WM_T_82546_3,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1043 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1044 	  WM_T_82546_3,		WMP_F_SERDES },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1047 	  "i82546GB quad-port Gigabit Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1051 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1052 	  WM_T_82546_3,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1055 	  "Intel PRO/1000MT (82546GB)",
   1056 	  WM_T_82546_3,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1059 	  "Intel i82541EI 1000BASE-T Ethernet",
   1060 	  WM_T_82541,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1063 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1064 	  WM_T_82541,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1067 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1068 	  WM_T_82541,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1071 	  "Intel i82541ER 1000BASE-T Ethernet",
   1072 	  WM_T_82541_2,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1075 	  "Intel i82541GI 1000BASE-T Ethernet",
   1076 	  WM_T_82541_2,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1079 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541_2,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1083 	  "Intel i82541PI 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1087 	  "Intel i82547EI 1000BASE-T Ethernet",
   1088 	  WM_T_82547,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1091 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82547,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1095 	  "Intel i82547GI 1000BASE-T Ethernet",
   1096 	  WM_T_82547_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1099 	  "Intel PRO/1000 PT (82571EB)",
   1100 	  WM_T_82571,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1103 	  "Intel PRO/1000 PF (82571EB)",
   1104 	  WM_T_82571,		WMP_F_FIBER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1107 	  "Intel PRO/1000 PB (82571EB)",
   1108 	  WM_T_82571,		WMP_F_SERDES },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1111 	  "Intel PRO/1000 QT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1115 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1116 	  WM_T_82571,		WMP_F_COPPER, },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1119 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1120 	  WM_T_82571,		WMP_F_COPPER, },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1123 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1124 	  WM_T_82571,		WMP_F_SERDES, },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1127 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1128 	  WM_T_82571,		WMP_F_SERDES, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1131 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1132 	  WM_T_82571,		WMP_F_FIBER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1135 	  "Intel i82572EI 1000baseT Ethernet",
   1136 	  WM_T_82572,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1139 	  "Intel i82572EI 1000baseX Ethernet",
   1140 	  WM_T_82572,		WMP_F_FIBER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1143 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1144 	  WM_T_82572,		WMP_F_SERDES },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1151 	  "Intel i82573E",
   1152 	  WM_T_82573,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1155 	  "Intel i82573E IAMT",
   1156 	  WM_T_82573,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1159 	  "Intel i82573L Gigabit Ethernet",
   1160 	  WM_T_82573,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1163 	  "Intel i82574L",
   1164 	  WM_T_82574,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1167 	  "Intel i82574L",
   1168 	  WM_T_82574,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1171 	  "Intel i82583V",
   1172 	  WM_T_82583,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1175 	  "i80003 dual 1000baseT Ethernet",
   1176 	  WM_T_80003,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1179 	  "i80003 dual 1000baseX Ethernet",
   1180 	  WM_T_80003,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1183 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1184 	  WM_T_80003,		WMP_F_SERDES },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1187 	  "Intel i80003 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1191 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1192 	  WM_T_80003,		WMP_F_SERDES },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1195 	  "Intel i82801H (M_AMT) LAN Controller",
   1196 	  WM_T_ICH8,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1198 	  "Intel i82801H (AMT) LAN Controller",
   1199 	  WM_T_ICH8,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1201 	  "Intel i82801H LAN Controller",
   1202 	  WM_T_ICH8,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1204 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1205 	  WM_T_ICH8,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1207 	  "Intel i82801H (M) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1210 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1213 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1216 	  "82567V-3 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1219 	  "82801I (AMT) LAN Controller",
   1220 	  WM_T_ICH9,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1222 	  "82801I 10/100 LAN Controller",
   1223 	  WM_T_ICH9,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1225 	  "82801I (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH9,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1228 	  "82801I (GT) 10/100 LAN Controller",
   1229 	  WM_T_ICH9,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1231 	  "82801I (C) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1234 	  "82801I mobile LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1237 	  "82801I mobile (V) LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1240 	  "82801I mobile (AMT) LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1243 	  "82567LM-4 LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1246 	  "82567LM-2 LAN Controller",
   1247 	  WM_T_ICH10,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1249 	  "82567LF-2 LAN Controller",
   1250 	  WM_T_ICH10,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1252 	  "82567LM-3 LAN Controller",
   1253 	  WM_T_ICH10,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1255 	  "82567LF-3 LAN Controller",
   1256 	  WM_T_ICH10,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1258 	  "82567V-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1261 	  "82567V-3? LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1264 	  "HANKSVILLE LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1267 	  "PCH LAN (82577LM) Controller",
   1268 	  WM_T_PCH,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1270 	  "PCH LAN (82577LC) Controller",
   1271 	  WM_T_PCH,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1273 	  "PCH LAN (82578DM) Controller",
   1274 	  WM_T_PCH,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1276 	  "PCH LAN (82578DC) Controller",
   1277 	  WM_T_PCH,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1279 	  "PCH2 LAN (82579LM) Controller",
   1280 	  WM_T_PCH2,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1282 	  "PCH2 LAN (82579V) Controller",
   1283 	  WM_T_PCH2,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1285 	  "82575EB dual-1000baseT Ethernet",
   1286 	  WM_T_82575,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1288 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1289 	  WM_T_82575,		WMP_F_SERDES },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1291 	  "82575GB quad-1000baseT Ethernet",
   1292 	  WM_T_82575,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1294 	  "82575GB quad-1000baseT Ethernet (PM)",
   1295 	  WM_T_82575,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1297 	  "82576 1000BaseT Ethernet",
   1298 	  WM_T_82576,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1300 	  "82576 1000BaseX Ethernet",
   1301 	  WM_T_82576,		WMP_F_FIBER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1304 	  "82576 gigabit Ethernet (SERDES)",
   1305 	  WM_T_82576,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1308 	  "82576 quad-1000BaseT Ethernet",
   1309 	  WM_T_82576,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1312 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1313 	  WM_T_82576,		WMP_F_COPPER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1316 	  "82576 gigabit Ethernet",
   1317 	  WM_T_82576,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1320 	  "82576 gigabit Ethernet (SERDES)",
   1321 	  WM_T_82576,		WMP_F_SERDES },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1323 	  "82576 quad-gigabit Ethernet (SERDES)",
   1324 	  WM_T_82576,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1327 	  "82580 1000BaseT Ethernet",
   1328 	  WM_T_82580,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1330 	  "82580 1000BaseX Ethernet",
   1331 	  WM_T_82580,		WMP_F_FIBER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1334 	  "82580 1000BaseT Ethernet (SERDES)",
   1335 	  WM_T_82580,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1338 	  "82580 gigabit Ethernet (SGMII)",
   1339 	  WM_T_82580,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1341 	  "82580 dual-1000BaseT Ethernet",
   1342 	  WM_T_82580,		WMP_F_COPPER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1345 	  "82580 quad-1000BaseX Ethernet",
   1346 	  WM_T_82580,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1349 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1350 	  WM_T_82580,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1353 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1354 	  WM_T_82580,		WMP_F_SERDES },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1357 	  "DH89XXCC 1000BASE-KX Ethernet",
   1358 	  WM_T_82580,		WMP_F_SERDES },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1361 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1362 	  WM_T_82580,		WMP_F_SERDES },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1365 	  "I350 Gigabit Network Connection",
   1366 	  WM_T_I350,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1369 	  "I350 Gigabit Fiber Network Connection",
   1370 	  WM_T_I350,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1373 	  "I350 Gigabit Backplane Connection",
   1374 	  WM_T_I350,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1377 	  "I350 Quad Port Gigabit Ethernet",
   1378 	  WM_T_I350,		WMP_F_SERDES },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1381 	  "I350 Gigabit Connection",
   1382 	  WM_T_I350,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1385 	  "I354 Gigabit Ethernet (KX)",
   1386 	  WM_T_I354,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1389 	  "I354 Gigabit Ethernet (SGMII)",
   1390 	  WM_T_I354,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1393 	  "I354 Gigabit Ethernet (2.5G)",
   1394 	  WM_T_I354,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1397 	  "I210-T1 Ethernet Server Adapter",
   1398 	  WM_T_I210,		WMP_F_COPPER },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1401 	  "I210 Ethernet (Copper OEM)",
   1402 	  WM_T_I210,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1405 	  "I210 Ethernet (Copper IT)",
   1406 	  WM_T_I210,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1409 	  "I210 Ethernet (FLASH less)",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1413 	  "I210 Gigabit Ethernet (Fiber)",
   1414 	  WM_T_I210,		WMP_F_FIBER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1417 	  "I210 Gigabit Ethernet (SERDES)",
   1418 	  WM_T_I210,		WMP_F_SERDES },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1421 	  "I210 Gigabit Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_SERDES },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1425 	  "I210 Gigabit Ethernet (SGMII)",
   1426 	  WM_T_I210,		WMP_F_COPPER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1429 	  "I211 Ethernet (COPPER)",
   1430 	  WM_T_I211,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1432 	  "I217 V Ethernet Connection",
   1433 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1435 	  "I217 LM Ethernet Connection",
   1436 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1438 	  "I218 V Ethernet Connection",
   1439 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1441 	  "I218 V Ethernet Connection",
   1442 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1444 	  "I218 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1447 	  "I218 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1450 	  "I218 LM Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1453 	  "I218 LM Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 #if 0
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1457 	  "I219 V Ethernet Connection",
   1458 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1460 	  "I219 V Ethernet Connection",
   1461 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1463 	  "I219 V Ethernet Connection",
   1464 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1466 	  "I219 V Ethernet Connection",
   1467 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1469 	  "I219 LM Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1472 	  "I219 LM Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1475 	  "I219 LM Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1478 	  "I219 LM Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 #endif
   1484 	{ 0,			0,
   1485 	  NULL,
   1486 	  0,			0 },
   1487 };
   1488 
   1489 /*
   1490  * Register read/write functions.
   1491  * Other than CSR_{READ|WRITE}().
   1492  */
   1493 
   1494 #if 0 /* Not currently used */
   1495 static inline uint32_t
   1496 wm_io_read(struct wm_softc *sc, int reg)
   1497 {
   1498 
   1499 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1500 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1501 }
   1502 #endif
   1503 
   1504 static inline void
   1505 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1506 {
   1507 
   1508 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1509 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1510 }
   1511 
   1512 static inline void
   1513 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1514     uint32_t data)
   1515 {
   1516 	uint32_t regval;
   1517 	int i;
   1518 
   1519 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1520 
   1521 	CSR_WRITE(sc, reg, regval);
   1522 
   1523 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1524 		delay(5);
   1525 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1526 			break;
   1527 	}
   1528 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1529 		aprint_error("%s: WARNING:"
   1530 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1531 		    device_xname(sc->sc_dev), reg);
   1532 	}
   1533 }
   1534 
   1535 static inline void
   1536 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1537 {
   1538 	wa->wa_low = htole32(v & 0xffffffffU);
   1539 	if (sizeof(bus_addr_t) == 8)
   1540 		wa->wa_high = htole32((uint64_t) v >> 32);
   1541 	else
   1542 		wa->wa_high = 0;
   1543 }
   1544 
   1545 /*
   1546  * Descriptor sync/init functions.
   1547  */
   1548 static inline void
   1549 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1550 {
   1551 	struct wm_softc *sc = txq->txq_sc;
   1552 
   1553 	/* If it will wrap around, sync to the end of the ring. */
   1554 	if ((start + num) > WM_NTXDESC(txq)) {
   1555 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1556 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1557 		    (WM_NTXDESC(txq) - start), ops);
   1558 		num -= (WM_NTXDESC(txq) - start);
   1559 		start = 0;
   1560 	}
   1561 
   1562 	/* Now sync whatever is left. */
   1563 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1564 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1565 }
   1566 
   1567 static inline void
   1568 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1569 {
   1570 	struct wm_softc *sc = rxq->rxq_sc;
   1571 
   1572 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1573 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1574 }
   1575 
   1576 static inline void
   1577 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1578 {
   1579 	struct wm_softc *sc = rxq->rxq_sc;
   1580 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1581 	struct mbuf *m = rxs->rxs_mbuf;
   1582 
   1583 	/*
   1584 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1585 	 * so that the payload after the Ethernet header is aligned
   1586 	 * to a 4-byte boundary.
   1587 
   1588 	 * XXX BRAINDAMAGE ALERT!
   1589 	 * The stupid chip uses the same size for every buffer, which
   1590 	 * is set in the Receive Control register.  We are using the 2K
   1591 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1592 	 * reason, we can't "scoot" packets longer than the standard
   1593 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1594 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1595 	 * the upper layer copy the headers.
   1596 	 */
   1597 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1598 
   1599 	if (sc->sc_type == WM_T_82574) {
   1600 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1601 		rxd->erx_data.erxd_addr =
   1602 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1603 		rxd->erx_data.erxd_dd = 0;
   1604 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1605 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1606 
   1607 		rxd->nqrx_data.nrxd_paddr =
   1608 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1609 		/* Currently, split header is not supported. */
   1610 		rxd->nqrx_data.nrxd_haddr = 0;
   1611 	} else {
   1612 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1613 
   1614 		wm_set_dma_addr(&rxd->wrx_addr,
   1615 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1616 		rxd->wrx_len = 0;
   1617 		rxd->wrx_cksum = 0;
   1618 		rxd->wrx_status = 0;
   1619 		rxd->wrx_errors = 0;
   1620 		rxd->wrx_special = 0;
   1621 	}
   1622 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1623 
   1624 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1625 }
   1626 
   1627 /*
   1628  * Device driver interface functions and commonly used functions.
   1629  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1630  */
   1631 
   1632 /* Lookup supported device table */
   1633 static const struct wm_product *
   1634 wm_lookup(const struct pci_attach_args *pa)
   1635 {
   1636 	const struct wm_product *wmp;
   1637 
   1638 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1639 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1640 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1641 			return wmp;
   1642 	}
   1643 	return NULL;
   1644 }
   1645 
   1646 /* The match function (ca_match) */
   1647 static int
   1648 wm_match(device_t parent, cfdata_t cf, void *aux)
   1649 {
   1650 	struct pci_attach_args *pa = aux;
   1651 
   1652 	if (wm_lookup(pa) != NULL)
   1653 		return 1;
   1654 
   1655 	return 0;
   1656 }
   1657 
   1658 /* The attach function (ca_attach) */
   1659 static void
   1660 wm_attach(device_t parent, device_t self, void *aux)
   1661 {
   1662 	struct wm_softc *sc = device_private(self);
   1663 	struct pci_attach_args *pa = aux;
   1664 	prop_dictionary_t dict;
   1665 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1666 	pci_chipset_tag_t pc = pa->pa_pc;
   1667 	int counts[PCI_INTR_TYPE_SIZE];
   1668 	pci_intr_type_t max_type;
   1669 	const char *eetype, *xname;
   1670 	bus_space_tag_t memt;
   1671 	bus_space_handle_t memh;
   1672 	bus_size_t memsize;
   1673 	int memh_valid;
   1674 	int i, error;
   1675 	const struct wm_product *wmp;
   1676 	prop_data_t ea;
   1677 	prop_number_t pn;
   1678 	uint8_t enaddr[ETHER_ADDR_LEN];
   1679 	char buf[256];
   1680 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1681 	pcireg_t preg, memtype;
   1682 	uint16_t eeprom_data, apme_mask;
   1683 	bool force_clear_smbi;
   1684 	uint32_t link_mode;
   1685 	uint32_t reg;
   1686 
   1687 	sc->sc_dev = self;
   1688 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1689 	sc->sc_core_stopping = false;
   1690 
   1691 	wmp = wm_lookup(pa);
   1692 #ifdef DIAGNOSTIC
   1693 	if (wmp == NULL) {
   1694 		printf("\n");
   1695 		panic("wm_attach: impossible");
   1696 	}
   1697 #endif
   1698 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1699 
   1700 	sc->sc_pc = pa->pa_pc;
   1701 	sc->sc_pcitag = pa->pa_tag;
   1702 
   1703 	if (pci_dma64_available(pa))
   1704 		sc->sc_dmat = pa->pa_dmat64;
   1705 	else
   1706 		sc->sc_dmat = pa->pa_dmat;
   1707 
   1708 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1709 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1710 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1711 
   1712 	sc->sc_type = wmp->wmp_type;
   1713 
   1714 	/* Set default function pointers */
   1715 	sc->phy.acquire = wm_get_null;
   1716 	sc->phy.release = wm_put_null;
   1717 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1718 
   1719 	if (sc->sc_type < WM_T_82543) {
   1720 		if (sc->sc_rev < 2) {
   1721 			aprint_error_dev(sc->sc_dev,
   1722 			    "i82542 must be at least rev. 2\n");
   1723 			return;
   1724 		}
   1725 		if (sc->sc_rev < 3)
   1726 			sc->sc_type = WM_T_82542_2_0;
   1727 	}
   1728 
   1729 	/*
   1730 	 * Disable MSI for Errata:
   1731 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1732 	 *
   1733 	 *  82544: Errata 25
   1734 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1735 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1736 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1737 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1738 	 *
   1739 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1740 	 *
   1741 	 *  82571 & 82572: Errata 63
   1742 	 */
   1743 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1744 	    || (sc->sc_type == WM_T_82572))
   1745 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1746 
   1747 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1748 	    || (sc->sc_type == WM_T_82580)
   1749 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1750 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1751 		sc->sc_flags |= WM_F_NEWQUEUE;
   1752 
   1753 	/* Set device properties (mactype) */
   1754 	dict = device_properties(sc->sc_dev);
   1755 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1756 
   1757 	/*
   1758 	 * Map the device.  All devices support memory-mapped acccess,
   1759 	 * and it is really required for normal operation.
   1760 	 */
   1761 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1762 	switch (memtype) {
   1763 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1764 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1765 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1766 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1767 		break;
   1768 	default:
   1769 		memh_valid = 0;
   1770 		break;
   1771 	}
   1772 
   1773 	if (memh_valid) {
   1774 		sc->sc_st = memt;
   1775 		sc->sc_sh = memh;
   1776 		sc->sc_ss = memsize;
   1777 	} else {
   1778 		aprint_error_dev(sc->sc_dev,
   1779 		    "unable to map device registers\n");
   1780 		return;
   1781 	}
   1782 
   1783 	/*
   1784 	 * In addition, i82544 and later support I/O mapped indirect
   1785 	 * register access.  It is not desirable (nor supported in
   1786 	 * this driver) to use it for normal operation, though it is
   1787 	 * required to work around bugs in some chip versions.
   1788 	 */
   1789 	if (sc->sc_type >= WM_T_82544) {
   1790 		/* First we have to find the I/O BAR. */
   1791 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1792 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1793 			if (memtype == PCI_MAPREG_TYPE_IO)
   1794 				break;
   1795 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1796 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1797 				i += 4;	/* skip high bits, too */
   1798 		}
   1799 		if (i < PCI_MAPREG_END) {
   1800 			/*
   1801 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1802 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1803 			 * It's no problem because newer chips has no this
   1804 			 * bug.
   1805 			 *
   1806 			 * The i8254x doesn't apparently respond when the
   1807 			 * I/O BAR is 0, which looks somewhat like it's not
   1808 			 * been configured.
   1809 			 */
   1810 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1811 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1812 				aprint_error_dev(sc->sc_dev,
   1813 				    "WARNING: I/O BAR at zero.\n");
   1814 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1815 					0, &sc->sc_iot, &sc->sc_ioh,
   1816 					NULL, &sc->sc_ios) == 0) {
   1817 				sc->sc_flags |= WM_F_IOH_VALID;
   1818 			} else {
   1819 				aprint_error_dev(sc->sc_dev,
   1820 				    "WARNING: unable to map I/O space\n");
   1821 			}
   1822 		}
   1823 
   1824 	}
   1825 
   1826 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1827 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1828 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1829 	if (sc->sc_type < WM_T_82542_2_1)
   1830 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1831 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1832 
   1833 	/* power up chip */
   1834 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1835 	    NULL)) && error != EOPNOTSUPP) {
   1836 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1837 		return;
   1838 	}
   1839 
   1840 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1841 
   1842 	/* Allocation settings */
   1843 	max_type = PCI_INTR_TYPE_MSIX;
   1844 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1845 	counts[PCI_INTR_TYPE_MSI] = 1;
   1846 	counts[PCI_INTR_TYPE_INTX] = 1;
   1847 	/* overridden by disable flags */
   1848 	if (wm_disable_msi != 0) {
   1849 		counts[PCI_INTR_TYPE_MSI] = 0;
   1850 		if (wm_disable_msix != 0) {
   1851 			max_type = PCI_INTR_TYPE_INTX;
   1852 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1853 		}
   1854 	} else if (wm_disable_msix != 0) {
   1855 		max_type = PCI_INTR_TYPE_MSI;
   1856 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1857 	}
   1858 
   1859 alloc_retry:
   1860 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1861 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1862 		return;
   1863 	}
   1864 
   1865 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1866 		error = wm_setup_msix(sc);
   1867 		if (error) {
   1868 			pci_intr_release(pc, sc->sc_intrs,
   1869 			    counts[PCI_INTR_TYPE_MSIX]);
   1870 
   1871 			/* Setup for MSI: Disable MSI-X */
   1872 			max_type = PCI_INTR_TYPE_MSI;
   1873 			counts[PCI_INTR_TYPE_MSI] = 1;
   1874 			counts[PCI_INTR_TYPE_INTX] = 1;
   1875 			goto alloc_retry;
   1876 		}
   1877 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1878 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1879 		error = wm_setup_legacy(sc);
   1880 		if (error) {
   1881 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1882 			    counts[PCI_INTR_TYPE_MSI]);
   1883 
   1884 			/* The next try is for INTx: Disable MSI */
   1885 			max_type = PCI_INTR_TYPE_INTX;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_INTX]);
   1895 			return;
   1896 		}
   1897 	}
   1898 
   1899 	/*
   1900 	 * Check the function ID (unit number of the chip).
   1901 	 */
   1902 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1903 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1904 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1905 	    || (sc->sc_type == WM_T_82580)
   1906 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1907 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1908 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1909 	else
   1910 		sc->sc_funcid = 0;
   1911 
   1912 	/*
   1913 	 * Determine a few things about the bus we're connected to.
   1914 	 */
   1915 	if (sc->sc_type < WM_T_82543) {
   1916 		/* We don't really know the bus characteristics here. */
   1917 		sc->sc_bus_speed = 33;
   1918 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1919 		/*
   1920 		 * CSA (Communication Streaming Architecture) is about as fast
   1921 		 * a 32-bit 66MHz PCI Bus.
   1922 		 */
   1923 		sc->sc_flags |= WM_F_CSA;
   1924 		sc->sc_bus_speed = 66;
   1925 		aprint_verbose_dev(sc->sc_dev,
   1926 		    "Communication Streaming Architecture\n");
   1927 		if (sc->sc_type == WM_T_82547) {
   1928 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1929 			callout_setfunc(&sc->sc_txfifo_ch,
   1930 					wm_82547_txfifo_stall, sc);
   1931 			aprint_verbose_dev(sc->sc_dev,
   1932 			    "using 82547 Tx FIFO stall work-around\n");
   1933 		}
   1934 	} else if (sc->sc_type >= WM_T_82571) {
   1935 		sc->sc_flags |= WM_F_PCIE;
   1936 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1937 		    && (sc->sc_type != WM_T_ICH10)
   1938 		    && (sc->sc_type != WM_T_PCH)
   1939 		    && (sc->sc_type != WM_T_PCH2)
   1940 		    && (sc->sc_type != WM_T_PCH_LPT)
   1941 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1942 			/* ICH* and PCH* have no PCIe capability registers */
   1943 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1944 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1945 				NULL) == 0)
   1946 				aprint_error_dev(sc->sc_dev,
   1947 				    "unable to find PCIe capability\n");
   1948 		}
   1949 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1950 	} else {
   1951 		reg = CSR_READ(sc, WMREG_STATUS);
   1952 		if (reg & STATUS_BUS64)
   1953 			sc->sc_flags |= WM_F_BUS64;
   1954 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1955 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1956 
   1957 			sc->sc_flags |= WM_F_PCIX;
   1958 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1959 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1960 				aprint_error_dev(sc->sc_dev,
   1961 				    "unable to find PCIX capability\n");
   1962 			else if (sc->sc_type != WM_T_82545_3 &&
   1963 				 sc->sc_type != WM_T_82546_3) {
   1964 				/*
   1965 				 * Work around a problem caused by the BIOS
   1966 				 * setting the max memory read byte count
   1967 				 * incorrectly.
   1968 				 */
   1969 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1971 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1972 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1973 
   1974 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1975 				    PCIX_CMD_BYTECNT_SHIFT;
   1976 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1977 				    PCIX_STATUS_MAXB_SHIFT;
   1978 				if (bytecnt > maxb) {
   1979 					aprint_verbose_dev(sc->sc_dev,
   1980 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1981 					    512 << bytecnt, 512 << maxb);
   1982 					pcix_cmd = (pcix_cmd &
   1983 					    ~PCIX_CMD_BYTECNT_MASK) |
   1984 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1985 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1986 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1987 					    pcix_cmd);
   1988 				}
   1989 			}
   1990 		}
   1991 		/*
   1992 		 * The quad port adapter is special; it has a PCIX-PCIX
   1993 		 * bridge on the board, and can run the secondary bus at
   1994 		 * a higher speed.
   1995 		 */
   1996 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1997 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1998 								      : 66;
   1999 		} else if (sc->sc_flags & WM_F_PCIX) {
   2000 			switch (reg & STATUS_PCIXSPD_MASK) {
   2001 			case STATUS_PCIXSPD_50_66:
   2002 				sc->sc_bus_speed = 66;
   2003 				break;
   2004 			case STATUS_PCIXSPD_66_100:
   2005 				sc->sc_bus_speed = 100;
   2006 				break;
   2007 			case STATUS_PCIXSPD_100_133:
   2008 				sc->sc_bus_speed = 133;
   2009 				break;
   2010 			default:
   2011 				aprint_error_dev(sc->sc_dev,
   2012 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2013 				    reg & STATUS_PCIXSPD_MASK);
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			}
   2017 		} else
   2018 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2019 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2020 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2021 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2022 	}
   2023 
   2024 	/* clear interesting stat counters */
   2025 	CSR_READ(sc, WMREG_COLC);
   2026 	CSR_READ(sc, WMREG_RXERRC);
   2027 
   2028 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2029 	    || (sc->sc_type >= WM_T_ICH8))
   2030 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 	if (sc->sc_type >= WM_T_ICH8)
   2032 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2033 
   2034 	/* Set PHY, NVM mutex related stuff */
   2035 	switch (sc->sc_type) {
   2036 	case WM_T_82542_2_0:
   2037 	case WM_T_82542_2_1:
   2038 	case WM_T_82543:
   2039 	case WM_T_82544:
   2040 		/* Microwire */
   2041 		sc->sc_nvm_wordsize = 64;
   2042 		sc->sc_nvm_addrbits = 6;
   2043 		break;
   2044 	case WM_T_82540:
   2045 	case WM_T_82545:
   2046 	case WM_T_82545_3:
   2047 	case WM_T_82546:
   2048 	case WM_T_82546_3:
   2049 		/* Microwire */
   2050 		reg = CSR_READ(sc, WMREG_EECD);
   2051 		if (reg & EECD_EE_SIZE) {
   2052 			sc->sc_nvm_wordsize = 256;
   2053 			sc->sc_nvm_addrbits = 8;
   2054 		} else {
   2055 			sc->sc_nvm_wordsize = 64;
   2056 			sc->sc_nvm_addrbits = 6;
   2057 		}
   2058 		sc->sc_flags |= WM_F_LOCK_EECD;
   2059 		break;
   2060 	case WM_T_82541:
   2061 	case WM_T_82541_2:
   2062 	case WM_T_82547:
   2063 	case WM_T_82547_2:
   2064 		sc->sc_flags |= WM_F_LOCK_EECD;
   2065 		reg = CSR_READ(sc, WMREG_EECD);
   2066 		if (reg & EECD_EE_TYPE) {
   2067 			/* SPI */
   2068 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2069 			wm_nvm_set_addrbits_size_eecd(sc);
   2070 		} else {
   2071 			/* Microwire */
   2072 			if ((reg & EECD_EE_ABITS) != 0) {
   2073 				sc->sc_nvm_wordsize = 256;
   2074 				sc->sc_nvm_addrbits = 8;
   2075 			} else {
   2076 				sc->sc_nvm_wordsize = 64;
   2077 				sc->sc_nvm_addrbits = 6;
   2078 			}
   2079 		}
   2080 		break;
   2081 	case WM_T_82571:
   2082 	case WM_T_82572:
   2083 		/* SPI */
   2084 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2085 		wm_nvm_set_addrbits_size_eecd(sc);
   2086 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2087 		sc->phy.acquire = wm_get_swsm_semaphore;
   2088 		sc->phy.release = wm_put_swsm_semaphore;
   2089 		break;
   2090 	case WM_T_82573:
   2091 	case WM_T_82574:
   2092 	case WM_T_82583:
   2093 		if (sc->sc_type == WM_T_82573) {
   2094 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2095 			sc->phy.acquire = wm_get_swsm_semaphore;
   2096 			sc->phy.release = wm_put_swsm_semaphore;
   2097 		} else {
   2098 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2099 			/* Both PHY and NVM use the same semaphore. */
   2100 			sc->phy.acquire
   2101 			    = wm_get_swfwhw_semaphore;
   2102 			sc->phy.release
   2103 			    = wm_put_swfwhw_semaphore;
   2104 		}
   2105 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2106 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2107 			sc->sc_nvm_wordsize = 2048;
   2108 		} else {
   2109 			/* SPI */
   2110 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 			wm_nvm_set_addrbits_size_eecd(sc);
   2112 		}
   2113 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2114 		break;
   2115 	case WM_T_82575:
   2116 	case WM_T_82576:
   2117 	case WM_T_82580:
   2118 	case WM_T_I350:
   2119 	case WM_T_I354:
   2120 	case WM_T_80003:
   2121 		/* SPI */
   2122 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2123 		wm_nvm_set_addrbits_size_eecd(sc);
   2124 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2125 		    | WM_F_LOCK_SWSM;
   2126 		sc->phy.acquire = wm_get_phy_82575;
   2127 		sc->phy.release = wm_put_phy_82575;
   2128 		break;
   2129 	case WM_T_ICH8:
   2130 	case WM_T_ICH9:
   2131 	case WM_T_ICH10:
   2132 	case WM_T_PCH:
   2133 	case WM_T_PCH2:
   2134 	case WM_T_PCH_LPT:
   2135 		/* FLASH */
   2136 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2137 		sc->sc_nvm_wordsize = 2048;
   2138 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2139 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2140 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2141 			aprint_error_dev(sc->sc_dev,
   2142 			    "can't map FLASH registers\n");
   2143 			goto out;
   2144 		}
   2145 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2146 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2147 		    ICH_FLASH_SECTOR_SIZE;
   2148 		sc->sc_ich8_flash_bank_size =
   2149 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2150 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2151 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2152 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2153 		sc->sc_flashreg_offset = 0;
   2154 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2155 		sc->phy.release = wm_put_swflag_ich8lan;
   2156 		break;
   2157 	case WM_T_PCH_SPT:
   2158 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2159 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2160 		sc->sc_flasht = sc->sc_st;
   2161 		sc->sc_flashh = sc->sc_sh;
   2162 		sc->sc_ich8_flash_base = 0;
   2163 		sc->sc_nvm_wordsize =
   2164 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2165 			* NVM_SIZE_MULTIPLIER;
   2166 		/* It is size in bytes, we want words */
   2167 		sc->sc_nvm_wordsize /= 2;
   2168 		/* assume 2 banks */
   2169 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2170 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2171 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2172 		sc->phy.release = wm_put_swflag_ich8lan;
   2173 		break;
   2174 	case WM_T_I210:
   2175 	case WM_T_I211:
   2176 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2177 			wm_nvm_set_addrbits_size_eecd(sc);
   2178 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2179 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2180 		} else {
   2181 			sc->sc_nvm_wordsize = INVM_SIZE;
   2182 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2183 		}
   2184 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2185 		sc->phy.acquire = wm_get_phy_82575;
   2186 		sc->phy.release = wm_put_phy_82575;
   2187 		break;
   2188 	default:
   2189 		break;
   2190 	}
   2191 
   2192 	/* Reset the chip to a known state. */
   2193 	wm_reset(sc);
   2194 
   2195 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2196 	switch (sc->sc_type) {
   2197 	case WM_T_82571:
   2198 	case WM_T_82572:
   2199 		reg = CSR_READ(sc, WMREG_SWSM2);
   2200 		if ((reg & SWSM2_LOCK) == 0) {
   2201 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2202 			force_clear_smbi = true;
   2203 		} else
   2204 			force_clear_smbi = false;
   2205 		break;
   2206 	case WM_T_82573:
   2207 	case WM_T_82574:
   2208 	case WM_T_82583:
   2209 		force_clear_smbi = true;
   2210 		break;
   2211 	default:
   2212 		force_clear_smbi = false;
   2213 		break;
   2214 	}
   2215 	if (force_clear_smbi) {
   2216 		reg = CSR_READ(sc, WMREG_SWSM);
   2217 		if ((reg & SWSM_SMBI) != 0)
   2218 			aprint_error_dev(sc->sc_dev,
   2219 			    "Please update the Bootagent\n");
   2220 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2221 	}
   2222 
   2223 	/*
   2224 	 * Defer printing the EEPROM type until after verifying the checksum
   2225 	 * This allows the EEPROM type to be printed correctly in the case
   2226 	 * that no EEPROM is attached.
   2227 	 */
   2228 	/*
   2229 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2230 	 * this for later, so we can fail future reads from the EEPROM.
   2231 	 */
   2232 	if (wm_nvm_validate_checksum(sc)) {
   2233 		/*
   2234 		 * Read twice again because some PCI-e parts fail the
   2235 		 * first check due to the link being in sleep state.
   2236 		 */
   2237 		if (wm_nvm_validate_checksum(sc))
   2238 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2239 	}
   2240 
   2241 	/* Set device properties (macflags) */
   2242 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2243 
   2244 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2245 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2246 	else {
   2247 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2248 		    sc->sc_nvm_wordsize);
   2249 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2250 			aprint_verbose("iNVM");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2252 			aprint_verbose("FLASH(HW)");
   2253 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2254 			aprint_verbose("FLASH");
   2255 		else {
   2256 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2257 				eetype = "SPI";
   2258 			else
   2259 				eetype = "MicroWire";
   2260 			aprint_verbose("(%d address bits) %s EEPROM",
   2261 			    sc->sc_nvm_addrbits, eetype);
   2262 		}
   2263 	}
   2264 	wm_nvm_version(sc);
   2265 	aprint_verbose("\n");
   2266 
   2267 	/* Check for I21[01] PLL workaround */
   2268 	if (sc->sc_type == WM_T_I210)
   2269 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2270 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2271 		/* NVM image release 3.25 has a workaround */
   2272 		if ((sc->sc_nvm_ver_major < 3)
   2273 		    || ((sc->sc_nvm_ver_major == 3)
   2274 			&& (sc->sc_nvm_ver_minor < 25))) {
   2275 			aprint_verbose_dev(sc->sc_dev,
   2276 			    "ROM image version %d.%d is older than 3.25\n",
   2277 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2278 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2279 		}
   2280 	}
   2281 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2282 		wm_pll_workaround_i210(sc);
   2283 
   2284 	wm_get_wakeup(sc);
   2285 
   2286 	/* Non-AMT based hardware can now take control from firmware */
   2287 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2288 		wm_get_hw_control(sc);
   2289 
   2290 	/*
   2291 	 * Read the Ethernet address from the EEPROM, if not first found
   2292 	 * in device properties.
   2293 	 */
   2294 	ea = prop_dictionary_get(dict, "mac-address");
   2295 	if (ea != NULL) {
   2296 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2297 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2298 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2299 	} else {
   2300 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2301 			aprint_error_dev(sc->sc_dev,
   2302 			    "unable to read Ethernet address\n");
   2303 			goto out;
   2304 		}
   2305 	}
   2306 
   2307 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2308 	    ether_sprintf(enaddr));
   2309 
   2310 	/*
   2311 	 * Read the config info from the EEPROM, and set up various
   2312 	 * bits in the control registers based on their contents.
   2313 	 */
   2314 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2315 	if (pn != NULL) {
   2316 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2317 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2318 	} else {
   2319 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2320 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2321 			goto out;
   2322 		}
   2323 	}
   2324 
   2325 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2326 	if (pn != NULL) {
   2327 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2328 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2329 	} else {
   2330 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2331 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2332 			goto out;
   2333 		}
   2334 	}
   2335 
   2336 	/* check for WM_F_WOL */
   2337 	switch (sc->sc_type) {
   2338 	case WM_T_82542_2_0:
   2339 	case WM_T_82542_2_1:
   2340 	case WM_T_82543:
   2341 		/* dummy? */
   2342 		eeprom_data = 0;
   2343 		apme_mask = NVM_CFG3_APME;
   2344 		break;
   2345 	case WM_T_82544:
   2346 		apme_mask = NVM_CFG2_82544_APM_EN;
   2347 		eeprom_data = cfg2;
   2348 		break;
   2349 	case WM_T_82546:
   2350 	case WM_T_82546_3:
   2351 	case WM_T_82571:
   2352 	case WM_T_82572:
   2353 	case WM_T_82573:
   2354 	case WM_T_82574:
   2355 	case WM_T_82583:
   2356 	case WM_T_80003:
   2357 	default:
   2358 		apme_mask = NVM_CFG3_APME;
   2359 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2360 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2361 		break;
   2362 	case WM_T_82575:
   2363 	case WM_T_82576:
   2364 	case WM_T_82580:
   2365 	case WM_T_I350:
   2366 	case WM_T_I354: /* XXX ok? */
   2367 	case WM_T_ICH8:
   2368 	case WM_T_ICH9:
   2369 	case WM_T_ICH10:
   2370 	case WM_T_PCH:
   2371 	case WM_T_PCH2:
   2372 	case WM_T_PCH_LPT:
   2373 	case WM_T_PCH_SPT:
   2374 		/* XXX The funcid should be checked on some devices */
   2375 		apme_mask = WUC_APME;
   2376 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2377 		break;
   2378 	}
   2379 
   2380 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2381 	if ((eeprom_data & apme_mask) != 0)
   2382 		sc->sc_flags |= WM_F_WOL;
   2383 
   2384 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2385 		/* Check NVM for autonegotiation */
   2386 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2387 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2388 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2389 		}
   2390 	}
   2391 
   2392 	/*
   2393 	 * XXX need special handling for some multiple port cards
   2394 	 * to disable a paticular port.
   2395 	 */
   2396 
   2397 	if (sc->sc_type >= WM_T_82544) {
   2398 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2399 		if (pn != NULL) {
   2400 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2401 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2402 		} else {
   2403 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2404 				aprint_error_dev(sc->sc_dev,
   2405 				    "unable to read SWDPIN\n");
   2406 				goto out;
   2407 			}
   2408 		}
   2409 	}
   2410 
   2411 	if (cfg1 & NVM_CFG1_ILOS)
   2412 		sc->sc_ctrl |= CTRL_ILOS;
   2413 
   2414 	/*
   2415 	 * XXX
   2416 	 * This code isn't correct because pin 2 and 3 are located
   2417 	 * in different position on newer chips. Check all datasheet.
   2418 	 *
   2419 	 * Until resolve this problem, check if a chip < 82580
   2420 	 */
   2421 	if (sc->sc_type <= WM_T_82580) {
   2422 		if (sc->sc_type >= WM_T_82544) {
   2423 			sc->sc_ctrl |=
   2424 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2425 			    CTRL_SWDPIO_SHIFT;
   2426 			sc->sc_ctrl |=
   2427 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2428 			    CTRL_SWDPINS_SHIFT;
   2429 		} else {
   2430 			sc->sc_ctrl |=
   2431 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2432 			    CTRL_SWDPIO_SHIFT;
   2433 		}
   2434 	}
   2435 
   2436 	/* XXX For other than 82580? */
   2437 	if (sc->sc_type == WM_T_82580) {
   2438 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2439 		if (nvmword & __BIT(13))
   2440 			sc->sc_ctrl |= CTRL_ILOS;
   2441 	}
   2442 
   2443 #if 0
   2444 	if (sc->sc_type >= WM_T_82544) {
   2445 		if (cfg1 & NVM_CFG1_IPS0)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2447 		if (cfg1 & NVM_CFG1_IPS1)
   2448 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2449 		sc->sc_ctrl_ext |=
   2450 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2451 		    CTRL_EXT_SWDPIO_SHIFT;
   2452 		sc->sc_ctrl_ext |=
   2453 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2454 		    CTRL_EXT_SWDPINS_SHIFT;
   2455 	} else {
   2456 		sc->sc_ctrl_ext |=
   2457 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2458 		    CTRL_EXT_SWDPIO_SHIFT;
   2459 	}
   2460 #endif
   2461 
   2462 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2463 #if 0
   2464 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2465 #endif
   2466 
   2467 	if (sc->sc_type == WM_T_PCH) {
   2468 		uint16_t val;
   2469 
   2470 		/* Save the NVM K1 bit setting */
   2471 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2472 
   2473 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2474 			sc->sc_nvm_k1_enabled = 1;
   2475 		else
   2476 			sc->sc_nvm_k1_enabled = 0;
   2477 	}
   2478 
   2479 	/*
   2480 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2481 	 * media structures accordingly.
   2482 	 */
   2483 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2484 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2485 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2486 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2487 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2488 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2489 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2490 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2491 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2492 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2493 	    || (sc->sc_type ==WM_T_I211)) {
   2494 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2495 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2496 		switch (link_mode) {
   2497 		case CTRL_EXT_LINK_MODE_1000KX:
   2498 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2499 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2500 			break;
   2501 		case CTRL_EXT_LINK_MODE_SGMII:
   2502 			if (wm_sgmii_uses_mdio(sc)) {
   2503 				aprint_verbose_dev(sc->sc_dev,
   2504 				    "SGMII(MDIO)\n");
   2505 				sc->sc_flags |= WM_F_SGMII;
   2506 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2507 				break;
   2508 			}
   2509 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2510 			/*FALLTHROUGH*/
   2511 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2512 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2513 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2514 				if (link_mode
   2515 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2516 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2517 					sc->sc_flags |= WM_F_SGMII;
   2518 				} else {
   2519 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2520 					aprint_verbose_dev(sc->sc_dev,
   2521 					    "SERDES\n");
   2522 				}
   2523 				break;
   2524 			}
   2525 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2526 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2527 
   2528 			/* Change current link mode setting */
   2529 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2530 			switch (sc->sc_mediatype) {
   2531 			case WM_MEDIATYPE_COPPER:
   2532 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2533 				break;
   2534 			case WM_MEDIATYPE_SERDES:
   2535 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2536 				break;
   2537 			default:
   2538 				break;
   2539 			}
   2540 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2541 			break;
   2542 		case CTRL_EXT_LINK_MODE_GMII:
   2543 		default:
   2544 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2546 			break;
   2547 		}
   2548 
   2549 		reg &= ~CTRL_EXT_I2C_ENA;
   2550 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2551 			reg |= CTRL_EXT_I2C_ENA;
   2552 		else
   2553 			reg &= ~CTRL_EXT_I2C_ENA;
   2554 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2555 
   2556 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2557 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2558 		else
   2559 			wm_tbi_mediainit(sc);
   2560 	} else if (sc->sc_type < WM_T_82543 ||
   2561 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2562 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2563 			aprint_error_dev(sc->sc_dev,
   2564 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2565 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2566 		}
   2567 		wm_tbi_mediainit(sc);
   2568 	} else {
   2569 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2570 			aprint_error_dev(sc->sc_dev,
   2571 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2572 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2573 		}
   2574 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2575 	}
   2576 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2577 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2578 
   2579 	ifp = &sc->sc_ethercom.ec_if;
   2580 	xname = device_xname(sc->sc_dev);
   2581 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2582 	ifp->if_softc = sc;
   2583 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2584 #ifdef WM_MPSAFE
   2585 	ifp->if_extflags = IFEF_START_MPSAFE;
   2586 #endif
   2587 	ifp->if_ioctl = wm_ioctl;
   2588 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2589 		ifp->if_start = wm_nq_start;
   2590 		/*
   2591 		 * When the number of CPUs is one and the controller can use
   2592 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2593 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2594 		 * and the other is used for link status changing.
   2595 		 * In this situation, wm_nq_transmit() is disadvantageous
   2596 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2597 		 */
   2598 		if (wm_is_using_multiqueue(sc))
   2599 			ifp->if_transmit = wm_nq_transmit;
   2600 	} else {
   2601 		ifp->if_start = wm_start;
   2602 		/*
   2603 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2604 		 */
   2605 		if (wm_is_using_multiqueue(sc))
   2606 			ifp->if_transmit = wm_transmit;
   2607 	}
   2608 	ifp->if_watchdog = wm_watchdog;
   2609 	ifp->if_init = wm_init;
   2610 	ifp->if_stop = wm_stop;
   2611 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2612 	IFQ_SET_READY(&ifp->if_snd);
   2613 
   2614 	/* Check for jumbo frame */
   2615 	switch (sc->sc_type) {
   2616 	case WM_T_82573:
   2617 		/* XXX limited to 9234 if ASPM is disabled */
   2618 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2619 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2620 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2621 		break;
   2622 	case WM_T_82571:
   2623 	case WM_T_82572:
   2624 	case WM_T_82574:
   2625 	case WM_T_82575:
   2626 	case WM_T_82576:
   2627 	case WM_T_82580:
   2628 	case WM_T_I350:
   2629 	case WM_T_I354: /* XXXX ok? */
   2630 	case WM_T_I210:
   2631 	case WM_T_I211:
   2632 	case WM_T_80003:
   2633 	case WM_T_ICH9:
   2634 	case WM_T_ICH10:
   2635 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2636 	case WM_T_PCH_LPT:
   2637 	case WM_T_PCH_SPT:
   2638 		/* XXX limited to 9234 */
   2639 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2640 		break;
   2641 	case WM_T_PCH:
   2642 		/* XXX limited to 4096 */
   2643 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2644 		break;
   2645 	case WM_T_82542_2_0:
   2646 	case WM_T_82542_2_1:
   2647 	case WM_T_82583:
   2648 	case WM_T_ICH8:
   2649 		/* No support for jumbo frame */
   2650 		break;
   2651 	default:
   2652 		/* ETHER_MAX_LEN_JUMBO */
   2653 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2654 		break;
   2655 	}
   2656 
   2657 	/* If we're a i82543 or greater, we can support VLANs. */
   2658 	if (sc->sc_type >= WM_T_82543)
   2659 		sc->sc_ethercom.ec_capabilities |=
   2660 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2661 
   2662 	/*
   2663 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2664 	 * on i82543 and later.
   2665 	 */
   2666 	if (sc->sc_type >= WM_T_82543) {
   2667 		ifp->if_capabilities |=
   2668 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2670 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2671 		    IFCAP_CSUM_TCPv6_Tx |
   2672 		    IFCAP_CSUM_UDPv6_Tx;
   2673 	}
   2674 
   2675 	/*
   2676 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2677 	 *
   2678 	 *	82541GI (8086:1076) ... no
   2679 	 *	82572EI (8086:10b9) ... yes
   2680 	 */
   2681 	if (sc->sc_type >= WM_T_82571) {
   2682 		ifp->if_capabilities |=
   2683 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2684 	}
   2685 
   2686 	/*
   2687 	 * If we're a i82544 or greater (except i82547), we can do
   2688 	 * TCP segmentation offload.
   2689 	 */
   2690 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2691 		ifp->if_capabilities |= IFCAP_TSOv4;
   2692 	}
   2693 
   2694 	if (sc->sc_type >= WM_T_82571) {
   2695 		ifp->if_capabilities |= IFCAP_TSOv6;
   2696 	}
   2697 
   2698 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2699 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2700 
   2701 #ifdef WM_MPSAFE
   2702 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2703 #else
   2704 	sc->sc_core_lock = NULL;
   2705 #endif
   2706 
   2707 	/* Attach the interface. */
   2708 	if_initialize(ifp);
   2709 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2710 	ether_ifattach(ifp, enaddr);
   2711 	if_register(ifp);
   2712 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2713 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2714 			  RND_FLAG_DEFAULT);
   2715 
   2716 #ifdef WM_EVENT_COUNTERS
   2717 	/* Attach event counters. */
   2718 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2719 	    NULL, xname, "linkintr");
   2720 
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xoff");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "tx_xon");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xoff");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_xon");
   2729 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2730 	    NULL, xname, "rx_macctl");
   2731 #endif /* WM_EVENT_COUNTERS */
   2732 
   2733 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2734 		pmf_class_network_register(self, ifp);
   2735 	else
   2736 		aprint_error_dev(self, "couldn't establish power handler\n");
   2737 
   2738 	sc->sc_flags |= WM_F_ATTACHED;
   2739  out:
   2740 	return;
   2741 }
   2742 
   2743 /* The detach function (ca_detach) */
   2744 static int
   2745 wm_detach(device_t self, int flags __unused)
   2746 {
   2747 	struct wm_softc *sc = device_private(self);
   2748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2749 	int i;
   2750 
   2751 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2752 		return 0;
   2753 
   2754 	/* Stop the interface. Callouts are stopped in it. */
   2755 	wm_stop(ifp, 1);
   2756 
   2757 	pmf_device_deregister(self);
   2758 
   2759 #ifdef WM_EVENT_COUNTERS
   2760 	evcnt_detach(&sc->sc_ev_linkintr);
   2761 
   2762 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_tx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2765 	evcnt_detach(&sc->sc_ev_rx_xon);
   2766 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2767 #endif /* WM_EVENT_COUNTERS */
   2768 
   2769 	/* Tell the firmware about the release */
   2770 	WM_CORE_LOCK(sc);
   2771 	wm_release_manageability(sc);
   2772 	wm_release_hw_control(sc);
   2773 	wm_enable_wakeup(sc);
   2774 	WM_CORE_UNLOCK(sc);
   2775 
   2776 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2777 
   2778 	/* Delete all remaining media. */
   2779 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2780 
   2781 	ether_ifdetach(ifp);
   2782 	if_detach(ifp);
   2783 	if_percpuq_destroy(sc->sc_ipq);
   2784 
   2785 	/* Unload RX dmamaps and free mbufs */
   2786 	for (i = 0; i < sc->sc_nqueues; i++) {
   2787 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2788 		mutex_enter(rxq->rxq_lock);
   2789 		wm_rxdrain(rxq);
   2790 		mutex_exit(rxq->rxq_lock);
   2791 	}
   2792 	/* Must unlock here */
   2793 
   2794 	/* Disestablish the interrupt handler */
   2795 	for (i = 0; i < sc->sc_nintrs; i++) {
   2796 		if (sc->sc_ihs[i] != NULL) {
   2797 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2798 			sc->sc_ihs[i] = NULL;
   2799 		}
   2800 	}
   2801 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2802 
   2803 	wm_free_txrx_queues(sc);
   2804 
   2805 	/* Unmap the registers */
   2806 	if (sc->sc_ss) {
   2807 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2808 		sc->sc_ss = 0;
   2809 	}
   2810 	if (sc->sc_ios) {
   2811 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2812 		sc->sc_ios = 0;
   2813 	}
   2814 	if (sc->sc_flashs) {
   2815 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2816 		sc->sc_flashs = 0;
   2817 	}
   2818 
   2819 	if (sc->sc_core_lock)
   2820 		mutex_obj_free(sc->sc_core_lock);
   2821 	if (sc->sc_ich_phymtx)
   2822 		mutex_obj_free(sc->sc_ich_phymtx);
   2823 	if (sc->sc_ich_nvmmtx)
   2824 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2825 
   2826 	return 0;
   2827 }
   2828 
   2829 static bool
   2830 wm_suspend(device_t self, const pmf_qual_t *qual)
   2831 {
   2832 	struct wm_softc *sc = device_private(self);
   2833 
   2834 	wm_release_manageability(sc);
   2835 	wm_release_hw_control(sc);
   2836 	wm_enable_wakeup(sc);
   2837 
   2838 	return true;
   2839 }
   2840 
   2841 static bool
   2842 wm_resume(device_t self, const pmf_qual_t *qual)
   2843 {
   2844 	struct wm_softc *sc = device_private(self);
   2845 
   2846 	wm_init_manageability(sc);
   2847 
   2848 	return true;
   2849 }
   2850 
   2851 /*
   2852  * wm_watchdog:		[ifnet interface function]
   2853  *
   2854  *	Watchdog timer handler.
   2855  */
   2856 static void
   2857 wm_watchdog(struct ifnet *ifp)
   2858 {
   2859 	int qid;
   2860 	struct wm_softc *sc = ifp->if_softc;
   2861 
   2862 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2863 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2864 
   2865 		wm_watchdog_txq(ifp, txq);
   2866 	}
   2867 
   2868 	/* Reset the interface. */
   2869 	(void) wm_init(ifp);
   2870 
   2871 	/*
   2872 	 * There are still some upper layer processing which call
   2873 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2874 	 */
   2875 	/* Try to get more packets going. */
   2876 	ifp->if_start(ifp);
   2877 }
   2878 
   2879 static void
   2880 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2881 {
   2882 	struct wm_softc *sc = ifp->if_softc;
   2883 
   2884 	/*
   2885 	 * Since we're using delayed interrupts, sweep up
   2886 	 * before we report an error.
   2887 	 */
   2888 	mutex_enter(txq->txq_lock);
   2889 	wm_txeof(sc, txq);
   2890 	mutex_exit(txq->txq_lock);
   2891 
   2892 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2893 #ifdef WM_DEBUG
   2894 		int i, j;
   2895 		struct wm_txsoft *txs;
   2896 #endif
   2897 		log(LOG_ERR,
   2898 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2899 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2900 		    txq->txq_next);
   2901 		ifp->if_oerrors++;
   2902 #ifdef WM_DEBUG
   2903 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2904 		    i = WM_NEXTTXS(txq, i)) {
   2905 		    txs = &txq->txq_soft[i];
   2906 		    printf("txs %d tx %d -> %d\n",
   2907 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2908 		    for (j = txs->txs_firstdesc; ;
   2909 			j = WM_NEXTTX(txq, j)) {
   2910 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2912 			printf("\t %#08x%08x\n",
   2913 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2914 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2915 			if (j == txs->txs_lastdesc)
   2916 				break;
   2917 			}
   2918 		}
   2919 #endif
   2920 	}
   2921 }
   2922 
   2923 /*
   2924  * wm_tick:
   2925  *
   2926  *	One second timer, used to check link status, sweep up
   2927  *	completed transmit jobs, etc.
   2928  */
   2929 static void
   2930 wm_tick(void *arg)
   2931 {
   2932 	struct wm_softc *sc = arg;
   2933 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2934 #ifndef WM_MPSAFE
   2935 	int s = splnet();
   2936 #endif
   2937 
   2938 	WM_CORE_LOCK(sc);
   2939 
   2940 	if (sc->sc_core_stopping)
   2941 		goto out;
   2942 
   2943 	if (sc->sc_type >= WM_T_82542_2_1) {
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2947 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2948 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2949 	}
   2950 
   2951 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2952 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2953 	    + CSR_READ(sc, WMREG_CRCERRS)
   2954 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2955 	    + CSR_READ(sc, WMREG_SYMERRC)
   2956 	    + CSR_READ(sc, WMREG_RXERRC)
   2957 	    + CSR_READ(sc, WMREG_SEC)
   2958 	    + CSR_READ(sc, WMREG_CEXTERR)
   2959 	    + CSR_READ(sc, WMREG_RLEC);
   2960 	/*
   2961 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2962 	 * memory. It does not mean the number of dropped packet. Because
   2963 	 * ethernet controller can receive packets in such case if there is
   2964 	 * space in phy's FIFO.
   2965 	 *
   2966 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2967 	 * own EVCNT instead of if_iqdrops.
   2968 	 */
   2969 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2970 
   2971 	if (sc->sc_flags & WM_F_HAS_MII)
   2972 		mii_tick(&sc->sc_mii);
   2973 	else if ((sc->sc_type >= WM_T_82575)
   2974 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2975 		wm_serdes_tick(sc);
   2976 	else
   2977 		wm_tbi_tick(sc);
   2978 
   2979 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2980 out:
   2981 	WM_CORE_UNLOCK(sc);
   2982 #ifndef WM_MPSAFE
   2983 	splx(s);
   2984 #endif
   2985 }
   2986 
   2987 static int
   2988 wm_ifflags_cb(struct ethercom *ec)
   2989 {
   2990 	struct ifnet *ifp = &ec->ec_if;
   2991 	struct wm_softc *sc = ifp->if_softc;
   2992 	int rc = 0;
   2993 
   2994 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2995 		device_xname(sc->sc_dev), __func__));
   2996 
   2997 	WM_CORE_LOCK(sc);
   2998 
   2999 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3000 	sc->sc_if_flags = ifp->if_flags;
   3001 
   3002 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3003 		rc = ENETRESET;
   3004 		goto out;
   3005 	}
   3006 
   3007 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3008 		wm_set_filter(sc);
   3009 
   3010 	wm_set_vlan(sc);
   3011 
   3012 out:
   3013 	WM_CORE_UNLOCK(sc);
   3014 
   3015 	return rc;
   3016 }
   3017 
   3018 /*
   3019  * wm_ioctl:		[ifnet interface function]
   3020  *
   3021  *	Handle control requests from the operator.
   3022  */
   3023 static int
   3024 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3025 {
   3026 	struct wm_softc *sc = ifp->if_softc;
   3027 	struct ifreq *ifr = (struct ifreq *) data;
   3028 	struct ifaddr *ifa = (struct ifaddr *)data;
   3029 	struct sockaddr_dl *sdl;
   3030 	int s, error;
   3031 
   3032 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3033 		device_xname(sc->sc_dev), __func__));
   3034 
   3035 #ifndef WM_MPSAFE
   3036 	s = splnet();
   3037 #endif
   3038 	switch (cmd) {
   3039 	case SIOCSIFMEDIA:
   3040 	case SIOCGIFMEDIA:
   3041 		WM_CORE_LOCK(sc);
   3042 		/* Flow control requires full-duplex mode. */
   3043 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3044 		    (ifr->ifr_media & IFM_FDX) == 0)
   3045 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3046 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3047 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3048 				/* We can do both TXPAUSE and RXPAUSE. */
   3049 				ifr->ifr_media |=
   3050 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3051 			}
   3052 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3053 		}
   3054 		WM_CORE_UNLOCK(sc);
   3055 #ifdef WM_MPSAFE
   3056 		s = splnet();
   3057 #endif
   3058 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3059 #ifdef WM_MPSAFE
   3060 		splx(s);
   3061 #endif
   3062 		break;
   3063 	case SIOCINITIFADDR:
   3064 		WM_CORE_LOCK(sc);
   3065 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3066 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3067 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3068 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3069 			/* unicast address is first multicast entry */
   3070 			wm_set_filter(sc);
   3071 			error = 0;
   3072 			WM_CORE_UNLOCK(sc);
   3073 			break;
   3074 		}
   3075 		WM_CORE_UNLOCK(sc);
   3076 		/*FALLTHROUGH*/
   3077 	default:
   3078 #ifdef WM_MPSAFE
   3079 		s = splnet();
   3080 #endif
   3081 		/* It may call wm_start, so unlock here */
   3082 		error = ether_ioctl(ifp, cmd, data);
   3083 #ifdef WM_MPSAFE
   3084 		splx(s);
   3085 #endif
   3086 		if (error != ENETRESET)
   3087 			break;
   3088 
   3089 		error = 0;
   3090 
   3091 		if (cmd == SIOCSIFCAP) {
   3092 			error = (*ifp->if_init)(ifp);
   3093 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3094 			;
   3095 		else if (ifp->if_flags & IFF_RUNNING) {
   3096 			/*
   3097 			 * Multicast list has changed; set the hardware filter
   3098 			 * accordingly.
   3099 			 */
   3100 			WM_CORE_LOCK(sc);
   3101 			wm_set_filter(sc);
   3102 			WM_CORE_UNLOCK(sc);
   3103 		}
   3104 		break;
   3105 	}
   3106 
   3107 #ifndef WM_MPSAFE
   3108 	splx(s);
   3109 #endif
   3110 	return error;
   3111 }
   3112 
   3113 /* MAC address related */
   3114 
   3115 /*
   3116  * Get the offset of MAC address and return it.
   3117  * If error occured, use offset 0.
   3118  */
   3119 static uint16_t
   3120 wm_check_alt_mac_addr(struct wm_softc *sc)
   3121 {
   3122 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3123 	uint16_t offset = NVM_OFF_MACADDR;
   3124 
   3125 	/* Try to read alternative MAC address pointer */
   3126 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3127 		return 0;
   3128 
   3129 	/* Check pointer if it's valid or not. */
   3130 	if ((offset == 0x0000) || (offset == 0xffff))
   3131 		return 0;
   3132 
   3133 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3134 	/*
   3135 	 * Check whether alternative MAC address is valid or not.
   3136 	 * Some cards have non 0xffff pointer but those don't use
   3137 	 * alternative MAC address in reality.
   3138 	 *
   3139 	 * Check whether the broadcast bit is set or not.
   3140 	 */
   3141 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3142 		if (((myea[0] & 0xff) & 0x01) == 0)
   3143 			return offset; /* Found */
   3144 
   3145 	/* Not found */
   3146 	return 0;
   3147 }
   3148 
   3149 static int
   3150 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3151 {
   3152 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3153 	uint16_t offset = NVM_OFF_MACADDR;
   3154 	int do_invert = 0;
   3155 
   3156 	switch (sc->sc_type) {
   3157 	case WM_T_82580:
   3158 	case WM_T_I350:
   3159 	case WM_T_I354:
   3160 		/* EEPROM Top Level Partitioning */
   3161 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3162 		break;
   3163 	case WM_T_82571:
   3164 	case WM_T_82575:
   3165 	case WM_T_82576:
   3166 	case WM_T_80003:
   3167 	case WM_T_I210:
   3168 	case WM_T_I211:
   3169 		offset = wm_check_alt_mac_addr(sc);
   3170 		if (offset == 0)
   3171 			if ((sc->sc_funcid & 0x01) == 1)
   3172 				do_invert = 1;
   3173 		break;
   3174 	default:
   3175 		if ((sc->sc_funcid & 0x01) == 1)
   3176 			do_invert = 1;
   3177 		break;
   3178 	}
   3179 
   3180 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3181 		goto bad;
   3182 
   3183 	enaddr[0] = myea[0] & 0xff;
   3184 	enaddr[1] = myea[0] >> 8;
   3185 	enaddr[2] = myea[1] & 0xff;
   3186 	enaddr[3] = myea[1] >> 8;
   3187 	enaddr[4] = myea[2] & 0xff;
   3188 	enaddr[5] = myea[2] >> 8;
   3189 
   3190 	/*
   3191 	 * Toggle the LSB of the MAC address on the second port
   3192 	 * of some dual port cards.
   3193 	 */
   3194 	if (do_invert != 0)
   3195 		enaddr[5] ^= 1;
   3196 
   3197 	return 0;
   3198 
   3199  bad:
   3200 	return -1;
   3201 }
   3202 
   3203 /*
   3204  * wm_set_ral:
   3205  *
   3206  *	Set an entery in the receive address list.
   3207  */
   3208 static void
   3209 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3210 {
   3211 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3212 	uint32_t wlock_mac;
   3213 	int rv;
   3214 
   3215 	if (enaddr != NULL) {
   3216 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3217 		    (enaddr[3] << 24);
   3218 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3219 		ral_hi |= RAL_AV;
   3220 	} else {
   3221 		ral_lo = 0;
   3222 		ral_hi = 0;
   3223 	}
   3224 
   3225 	switch (sc->sc_type) {
   3226 	case WM_T_82542_2_0:
   3227 	case WM_T_82542_2_1:
   3228 	case WM_T_82543:
   3229 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3230 		CSR_WRITE_FLUSH(sc);
   3231 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3232 		CSR_WRITE_FLUSH(sc);
   3233 		break;
   3234 	case WM_T_PCH2:
   3235 	case WM_T_PCH_LPT:
   3236 	case WM_T_PCH_SPT:
   3237 		if (idx == 0) {
   3238 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3239 			CSR_WRITE_FLUSH(sc);
   3240 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3241 			CSR_WRITE_FLUSH(sc);
   3242 			return;
   3243 		}
   3244 		if (sc->sc_type != WM_T_PCH2) {
   3245 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3246 			    FWSM_WLOCK_MAC);
   3247 			addrl = WMREG_SHRAL(idx - 1);
   3248 			addrh = WMREG_SHRAH(idx - 1);
   3249 		} else {
   3250 			wlock_mac = 0;
   3251 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3252 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3253 		}
   3254 
   3255 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3256 			rv = wm_get_swflag_ich8lan(sc);
   3257 			if (rv != 0)
   3258 				return;
   3259 			CSR_WRITE(sc, addrl, ral_lo);
   3260 			CSR_WRITE_FLUSH(sc);
   3261 			CSR_WRITE(sc, addrh, ral_hi);
   3262 			CSR_WRITE_FLUSH(sc);
   3263 			wm_put_swflag_ich8lan(sc);
   3264 		}
   3265 
   3266 		break;
   3267 	default:
   3268 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3269 		CSR_WRITE_FLUSH(sc);
   3270 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3271 		CSR_WRITE_FLUSH(sc);
   3272 		break;
   3273 	}
   3274 }
   3275 
   3276 /*
   3277  * wm_mchash:
   3278  *
   3279  *	Compute the hash of the multicast address for the 4096-bit
   3280  *	multicast filter.
   3281  */
   3282 static uint32_t
   3283 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3284 {
   3285 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3286 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3287 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3288 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3289 	uint32_t hash;
   3290 
   3291 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3292 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3293 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3294 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3295 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3296 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3297 		return (hash & 0x3ff);
   3298 	}
   3299 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3300 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3301 
   3302 	return (hash & 0xfff);
   3303 }
   3304 
   3305 /*
   3306  * wm_set_filter:
   3307  *
   3308  *	Set up the receive filter.
   3309  */
   3310 static void
   3311 wm_set_filter(struct wm_softc *sc)
   3312 {
   3313 	struct ethercom *ec = &sc->sc_ethercom;
   3314 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3315 	struct ether_multi *enm;
   3316 	struct ether_multistep step;
   3317 	bus_addr_t mta_reg;
   3318 	uint32_t hash, reg, bit;
   3319 	int i, size, ralmax;
   3320 
   3321 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3322 		device_xname(sc->sc_dev), __func__));
   3323 
   3324 	if (sc->sc_type >= WM_T_82544)
   3325 		mta_reg = WMREG_CORDOVA_MTA;
   3326 	else
   3327 		mta_reg = WMREG_MTA;
   3328 
   3329 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3330 
   3331 	if (ifp->if_flags & IFF_BROADCAST)
   3332 		sc->sc_rctl |= RCTL_BAM;
   3333 	if (ifp->if_flags & IFF_PROMISC) {
   3334 		sc->sc_rctl |= RCTL_UPE;
   3335 		goto allmulti;
   3336 	}
   3337 
   3338 	/*
   3339 	 * Set the station address in the first RAL slot, and
   3340 	 * clear the remaining slots.
   3341 	 */
   3342 	if (sc->sc_type == WM_T_ICH8)
   3343 		size = WM_RAL_TABSIZE_ICH8 -1;
   3344 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3345 	    || (sc->sc_type == WM_T_PCH))
   3346 		size = WM_RAL_TABSIZE_ICH8;
   3347 	else if (sc->sc_type == WM_T_PCH2)
   3348 		size = WM_RAL_TABSIZE_PCH2;
   3349 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3350 		size = WM_RAL_TABSIZE_PCH_LPT;
   3351 	else if (sc->sc_type == WM_T_82575)
   3352 		size = WM_RAL_TABSIZE_82575;
   3353 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3354 		size = WM_RAL_TABSIZE_82576;
   3355 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3356 		size = WM_RAL_TABSIZE_I350;
   3357 	else
   3358 		size = WM_RAL_TABSIZE;
   3359 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3360 
   3361 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3362 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3363 		switch (i) {
   3364 		case 0:
   3365 			/* We can use all entries */
   3366 			ralmax = size;
   3367 			break;
   3368 		case 1:
   3369 			/* Only RAR[0] */
   3370 			ralmax = 1;
   3371 			break;
   3372 		default:
   3373 			/* available SHRA + RAR[0] */
   3374 			ralmax = i + 1;
   3375 		}
   3376 	} else
   3377 		ralmax = size;
   3378 	for (i = 1; i < size; i++) {
   3379 		if (i < ralmax)
   3380 			wm_set_ral(sc, NULL, i);
   3381 	}
   3382 
   3383 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3384 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3385 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3386 	    || (sc->sc_type == WM_T_PCH_SPT))
   3387 		size = WM_ICH8_MC_TABSIZE;
   3388 	else
   3389 		size = WM_MC_TABSIZE;
   3390 	/* Clear out the multicast table. */
   3391 	for (i = 0; i < size; i++) {
   3392 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3393 		CSR_WRITE_FLUSH(sc);
   3394 	}
   3395 
   3396 	ETHER_LOCK(ec);
   3397 	ETHER_FIRST_MULTI(step, ec, enm);
   3398 	while (enm != NULL) {
   3399 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3400 			ETHER_UNLOCK(ec);
   3401 			/*
   3402 			 * We must listen to a range of multicast addresses.
   3403 			 * For now, just accept all multicasts, rather than
   3404 			 * trying to set only those filter bits needed to match
   3405 			 * the range.  (At this time, the only use of address
   3406 			 * ranges is for IP multicast routing, for which the
   3407 			 * range is big enough to require all bits set.)
   3408 			 */
   3409 			goto allmulti;
   3410 		}
   3411 
   3412 		hash = wm_mchash(sc, enm->enm_addrlo);
   3413 
   3414 		reg = (hash >> 5);
   3415 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3416 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3417 		    || (sc->sc_type == WM_T_PCH2)
   3418 		    || (sc->sc_type == WM_T_PCH_LPT)
   3419 		    || (sc->sc_type == WM_T_PCH_SPT))
   3420 			reg &= 0x1f;
   3421 		else
   3422 			reg &= 0x7f;
   3423 		bit = hash & 0x1f;
   3424 
   3425 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3426 		hash |= 1U << bit;
   3427 
   3428 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3429 			/*
   3430 			 * 82544 Errata 9: Certain register cannot be written
   3431 			 * with particular alignments in PCI-X bus operation
   3432 			 * (FCAH, MTA and VFTA).
   3433 			 */
   3434 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3435 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3436 			CSR_WRITE_FLUSH(sc);
   3437 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3438 			CSR_WRITE_FLUSH(sc);
   3439 		} else {
   3440 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3441 			CSR_WRITE_FLUSH(sc);
   3442 		}
   3443 
   3444 		ETHER_NEXT_MULTI(step, enm);
   3445 	}
   3446 	ETHER_UNLOCK(ec);
   3447 
   3448 	ifp->if_flags &= ~IFF_ALLMULTI;
   3449 	goto setit;
   3450 
   3451  allmulti:
   3452 	ifp->if_flags |= IFF_ALLMULTI;
   3453 	sc->sc_rctl |= RCTL_MPE;
   3454 
   3455  setit:
   3456 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3457 }
   3458 
   3459 /* Reset and init related */
   3460 
   3461 static void
   3462 wm_set_vlan(struct wm_softc *sc)
   3463 {
   3464 
   3465 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3466 		device_xname(sc->sc_dev), __func__));
   3467 
   3468 	/* Deal with VLAN enables. */
   3469 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3470 		sc->sc_ctrl |= CTRL_VME;
   3471 	else
   3472 		sc->sc_ctrl &= ~CTRL_VME;
   3473 
   3474 	/* Write the control registers. */
   3475 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3476 }
   3477 
   3478 static void
   3479 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3480 {
   3481 	uint32_t gcr;
   3482 	pcireg_t ctrl2;
   3483 
   3484 	gcr = CSR_READ(sc, WMREG_GCR);
   3485 
   3486 	/* Only take action if timeout value is defaulted to 0 */
   3487 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3488 		goto out;
   3489 
   3490 	if ((gcr & GCR_CAP_VER2) == 0) {
   3491 		gcr |= GCR_CMPL_TMOUT_10MS;
   3492 		goto out;
   3493 	}
   3494 
   3495 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3496 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3497 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3498 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3499 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3500 
   3501 out:
   3502 	/* Disable completion timeout resend */
   3503 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3504 
   3505 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3506 }
   3507 
   3508 void
   3509 wm_get_auto_rd_done(struct wm_softc *sc)
   3510 {
   3511 	int i;
   3512 
   3513 	/* wait for eeprom to reload */
   3514 	switch (sc->sc_type) {
   3515 	case WM_T_82571:
   3516 	case WM_T_82572:
   3517 	case WM_T_82573:
   3518 	case WM_T_82574:
   3519 	case WM_T_82583:
   3520 	case WM_T_82575:
   3521 	case WM_T_82576:
   3522 	case WM_T_82580:
   3523 	case WM_T_I350:
   3524 	case WM_T_I354:
   3525 	case WM_T_I210:
   3526 	case WM_T_I211:
   3527 	case WM_T_80003:
   3528 	case WM_T_ICH8:
   3529 	case WM_T_ICH9:
   3530 		for (i = 0; i < 10; i++) {
   3531 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3532 				break;
   3533 			delay(1000);
   3534 		}
   3535 		if (i == 10) {
   3536 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3537 			    "complete\n", device_xname(sc->sc_dev));
   3538 		}
   3539 		break;
   3540 	default:
   3541 		break;
   3542 	}
   3543 }
   3544 
   3545 void
   3546 wm_lan_init_done(struct wm_softc *sc)
   3547 {
   3548 	uint32_t reg = 0;
   3549 	int i;
   3550 
   3551 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3552 		device_xname(sc->sc_dev), __func__));
   3553 
   3554 	/* Wait for eeprom to reload */
   3555 	switch (sc->sc_type) {
   3556 	case WM_T_ICH10:
   3557 	case WM_T_PCH:
   3558 	case WM_T_PCH2:
   3559 	case WM_T_PCH_LPT:
   3560 	case WM_T_PCH_SPT:
   3561 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3562 			reg = CSR_READ(sc, WMREG_STATUS);
   3563 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3564 				break;
   3565 			delay(100);
   3566 		}
   3567 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3568 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3569 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3570 		}
   3571 		break;
   3572 	default:
   3573 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3574 		    __func__);
   3575 		break;
   3576 	}
   3577 
   3578 	reg &= ~STATUS_LAN_INIT_DONE;
   3579 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3580 }
   3581 
   3582 void
   3583 wm_get_cfg_done(struct wm_softc *sc)
   3584 {
   3585 	int mask;
   3586 	uint32_t reg;
   3587 	int i;
   3588 
   3589 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3590 		device_xname(sc->sc_dev), __func__));
   3591 
   3592 	/* Wait for eeprom to reload */
   3593 	switch (sc->sc_type) {
   3594 	case WM_T_82542_2_0:
   3595 	case WM_T_82542_2_1:
   3596 		/* null */
   3597 		break;
   3598 	case WM_T_82543:
   3599 	case WM_T_82544:
   3600 	case WM_T_82540:
   3601 	case WM_T_82545:
   3602 	case WM_T_82545_3:
   3603 	case WM_T_82546:
   3604 	case WM_T_82546_3:
   3605 	case WM_T_82541:
   3606 	case WM_T_82541_2:
   3607 	case WM_T_82547:
   3608 	case WM_T_82547_2:
   3609 	case WM_T_82573:
   3610 	case WM_T_82574:
   3611 	case WM_T_82583:
   3612 		/* generic */
   3613 		delay(10*1000);
   3614 		break;
   3615 	case WM_T_80003:
   3616 	case WM_T_82571:
   3617 	case WM_T_82572:
   3618 	case WM_T_82575:
   3619 	case WM_T_82576:
   3620 	case WM_T_82580:
   3621 	case WM_T_I350:
   3622 	case WM_T_I354:
   3623 	case WM_T_I210:
   3624 	case WM_T_I211:
   3625 		if (sc->sc_type == WM_T_82571) {
   3626 			/* Only 82571 shares port 0 */
   3627 			mask = EEMNGCTL_CFGDONE_0;
   3628 		} else
   3629 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3630 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3631 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3632 				break;
   3633 			delay(1000);
   3634 		}
   3635 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3636 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3637 				device_xname(sc->sc_dev), __func__));
   3638 		}
   3639 		break;
   3640 	case WM_T_ICH8:
   3641 	case WM_T_ICH9:
   3642 	case WM_T_ICH10:
   3643 	case WM_T_PCH:
   3644 	case WM_T_PCH2:
   3645 	case WM_T_PCH_LPT:
   3646 	case WM_T_PCH_SPT:
   3647 		delay(10*1000);
   3648 		if (sc->sc_type >= WM_T_ICH10)
   3649 			wm_lan_init_done(sc);
   3650 		else
   3651 			wm_get_auto_rd_done(sc);
   3652 
   3653 		reg = CSR_READ(sc, WMREG_STATUS);
   3654 		if ((reg & STATUS_PHYRA) != 0)
   3655 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3656 		break;
   3657 	default:
   3658 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3659 		    __func__);
   3660 		break;
   3661 	}
   3662 }
   3663 
   3664 void
   3665 wm_phy_post_reset(struct wm_softc *sc)
   3666 {
   3667 	uint32_t reg;
   3668 
   3669 	/* This function is only for ICH8 and newer. */
   3670 	if (sc->sc_type < WM_T_ICH8)
   3671 		return;
   3672 
   3673 	if (wm_phy_resetisblocked(sc)) {
   3674 		/* XXX */
   3675 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3676 		return;
   3677 	}
   3678 
   3679 	/* Allow time for h/w to get to quiescent state after reset */
   3680 	delay(10*1000);
   3681 
   3682 	/* Perform any necessary post-reset workarounds */
   3683 	if (sc->sc_type == WM_T_PCH)
   3684 		wm_hv_phy_workaround_ich8lan(sc);
   3685 	if (sc->sc_type == WM_T_PCH2)
   3686 		wm_lv_phy_workaround_ich8lan(sc);
   3687 
   3688 	/* Clear the host wakeup bit after lcd reset */
   3689 	if (sc->sc_type >= WM_T_PCH) {
   3690 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3691 		    BM_PORT_GEN_CFG);
   3692 		reg &= ~BM_WUC_HOST_WU_BIT;
   3693 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3694 		    BM_PORT_GEN_CFG, reg);
   3695 	}
   3696 
   3697 	/*
   3698 	 * XXX Configure the LCD with th extended configuration region
   3699 	 * in NVM
   3700 	 */
   3701 
   3702 	/* Configure the LCD with the OEM bits in NVM */
   3703 }
   3704 
   3705 /* Init hardware bits */
   3706 void
   3707 wm_initialize_hardware_bits(struct wm_softc *sc)
   3708 {
   3709 	uint32_t tarc0, tarc1, reg;
   3710 
   3711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3712 		device_xname(sc->sc_dev), __func__));
   3713 
   3714 	/* For 82571 variant, 80003 and ICHs */
   3715 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3716 	    || (sc->sc_type >= WM_T_80003)) {
   3717 
   3718 		/* Transmit Descriptor Control 0 */
   3719 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3720 		reg |= TXDCTL_COUNT_DESC;
   3721 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3722 
   3723 		/* Transmit Descriptor Control 1 */
   3724 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3725 		reg |= TXDCTL_COUNT_DESC;
   3726 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3727 
   3728 		/* TARC0 */
   3729 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3730 		switch (sc->sc_type) {
   3731 		case WM_T_82571:
   3732 		case WM_T_82572:
   3733 		case WM_T_82573:
   3734 		case WM_T_82574:
   3735 		case WM_T_82583:
   3736 		case WM_T_80003:
   3737 			/* Clear bits 30..27 */
   3738 			tarc0 &= ~__BITS(30, 27);
   3739 			break;
   3740 		default:
   3741 			break;
   3742 		}
   3743 
   3744 		switch (sc->sc_type) {
   3745 		case WM_T_82571:
   3746 		case WM_T_82572:
   3747 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3748 
   3749 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3750 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3751 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3752 			/* 8257[12] Errata No.7 */
   3753 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3754 
   3755 			/* TARC1 bit 28 */
   3756 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3757 				tarc1 &= ~__BIT(28);
   3758 			else
   3759 				tarc1 |= __BIT(28);
   3760 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3761 
   3762 			/*
   3763 			 * 8257[12] Errata No.13
   3764 			 * Disable Dyamic Clock Gating.
   3765 			 */
   3766 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3767 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3768 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3769 			break;
   3770 		case WM_T_82573:
   3771 		case WM_T_82574:
   3772 		case WM_T_82583:
   3773 			if ((sc->sc_type == WM_T_82574)
   3774 			    || (sc->sc_type == WM_T_82583))
   3775 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3776 
   3777 			/* Extended Device Control */
   3778 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3779 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3780 			reg |= __BIT(22);	/* Set bit 22 */
   3781 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3782 
   3783 			/* Device Control */
   3784 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3785 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3786 
   3787 			/* PCIe Control Register */
   3788 			/*
   3789 			 * 82573 Errata (unknown).
   3790 			 *
   3791 			 * 82574 Errata 25 and 82583 Errata 12
   3792 			 * "Dropped Rx Packets":
   3793 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3794 			 */
   3795 			reg = CSR_READ(sc, WMREG_GCR);
   3796 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3797 			CSR_WRITE(sc, WMREG_GCR, reg);
   3798 
   3799 			if ((sc->sc_type == WM_T_82574)
   3800 			    || (sc->sc_type == WM_T_82583)) {
   3801 				/*
   3802 				 * Document says this bit must be set for
   3803 				 * proper operation.
   3804 				 */
   3805 				reg = CSR_READ(sc, WMREG_GCR);
   3806 				reg |= __BIT(22);
   3807 				CSR_WRITE(sc, WMREG_GCR, reg);
   3808 
   3809 				/*
   3810 				 * Apply workaround for hardware errata
   3811 				 * documented in errata docs Fixes issue where
   3812 				 * some error prone or unreliable PCIe
   3813 				 * completions are occurring, particularly
   3814 				 * with ASPM enabled. Without fix, issue can
   3815 				 * cause Tx timeouts.
   3816 				 */
   3817 				reg = CSR_READ(sc, WMREG_GCR2);
   3818 				reg |= __BIT(0);
   3819 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3820 			}
   3821 			break;
   3822 		case WM_T_80003:
   3823 			/* TARC0 */
   3824 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3825 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3826 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3827 
   3828 			/* TARC1 bit 28 */
   3829 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3830 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3831 				tarc1 &= ~__BIT(28);
   3832 			else
   3833 				tarc1 |= __BIT(28);
   3834 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3835 			break;
   3836 		case WM_T_ICH8:
   3837 		case WM_T_ICH9:
   3838 		case WM_T_ICH10:
   3839 		case WM_T_PCH:
   3840 		case WM_T_PCH2:
   3841 		case WM_T_PCH_LPT:
   3842 		case WM_T_PCH_SPT:
   3843 			/* TARC0 */
   3844 			if ((sc->sc_type == WM_T_ICH8)
   3845 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3846 				/* Set TARC0 bits 29 and 28 */
   3847 				tarc0 |= __BITS(29, 28);
   3848 			}
   3849 			/* Set TARC0 bits 23,24,26,27 */
   3850 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3851 
   3852 			/* CTRL_EXT */
   3853 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3854 			reg |= __BIT(22);	/* Set bit 22 */
   3855 			/*
   3856 			 * Enable PHY low-power state when MAC is at D3
   3857 			 * w/o WoL
   3858 			 */
   3859 			if (sc->sc_type >= WM_T_PCH)
   3860 				reg |= CTRL_EXT_PHYPDEN;
   3861 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3862 
   3863 			/* TARC1 */
   3864 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3865 			/* bit 28 */
   3866 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3867 				tarc1 &= ~__BIT(28);
   3868 			else
   3869 				tarc1 |= __BIT(28);
   3870 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3871 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3872 
   3873 			/* Device Status */
   3874 			if (sc->sc_type == WM_T_ICH8) {
   3875 				reg = CSR_READ(sc, WMREG_STATUS);
   3876 				reg &= ~__BIT(31);
   3877 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3878 
   3879 			}
   3880 
   3881 			/* IOSFPC */
   3882 			if (sc->sc_type == WM_T_PCH_SPT) {
   3883 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3884 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3885 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3886 			}
   3887 			/*
   3888 			 * Work-around descriptor data corruption issue during
   3889 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3890 			 * capability.
   3891 			 */
   3892 			reg = CSR_READ(sc, WMREG_RFCTL);
   3893 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3894 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3895 			break;
   3896 		default:
   3897 			break;
   3898 		}
   3899 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3900 
   3901 		switch (sc->sc_type) {
   3902 		/*
   3903 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3904 		 * Avoid RSS Hash Value bug.
   3905 		 */
   3906 		case WM_T_82571:
   3907 		case WM_T_82572:
   3908 		case WM_T_82573:
   3909 		case WM_T_80003:
   3910 		case WM_T_ICH8:
   3911 			reg = CSR_READ(sc, WMREG_RFCTL);
   3912 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3913 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3914 			break;
   3915 		case WM_T_82574:
   3916 			/* use extened Rx descriptor. */
   3917 			reg = CSR_READ(sc, WMREG_RFCTL);
   3918 			reg |= WMREG_RFCTL_EXSTEN;
   3919 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3920 			break;
   3921 		default:
   3922 			break;
   3923 		}
   3924 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3925 		/*
   3926 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3927 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3928 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3929 		 * Correctly by the Device"
   3930 		 *
   3931 		 * I354(C2000) Errata AVR53:
   3932 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3933 		 * Hang"
   3934 		 */
   3935 		reg = CSR_READ(sc, WMREG_RFCTL);
   3936 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3937 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3938 	}
   3939 }
   3940 
   3941 static uint32_t
   3942 wm_rxpbs_adjust_82580(uint32_t val)
   3943 {
   3944 	uint32_t rv = 0;
   3945 
   3946 	if (val < __arraycount(wm_82580_rxpbs_table))
   3947 		rv = wm_82580_rxpbs_table[val];
   3948 
   3949 	return rv;
   3950 }
   3951 
   3952 /*
   3953  * wm_reset_phy:
   3954  *
   3955  *	generic PHY reset function.
   3956  *	Same as e1000_phy_hw_reset_generic()
   3957  */
   3958 static void
   3959 wm_reset_phy(struct wm_softc *sc)
   3960 {
   3961 	uint32_t reg;
   3962 
   3963 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3964 		device_xname(sc->sc_dev), __func__));
   3965 	if (wm_phy_resetisblocked(sc))
   3966 		return;
   3967 
   3968 	sc->phy.acquire(sc);
   3969 
   3970 	reg = CSR_READ(sc, WMREG_CTRL);
   3971 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3972 	CSR_WRITE_FLUSH(sc);
   3973 
   3974 	delay(sc->phy.reset_delay_us);
   3975 
   3976 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3977 	CSR_WRITE_FLUSH(sc);
   3978 
   3979 	delay(150);
   3980 
   3981 	sc->phy.release(sc);
   3982 
   3983 	wm_get_cfg_done(sc);
   3984 	wm_phy_post_reset(sc);
   3985 }
   3986 
   3987 static void
   3988 wm_flush_desc_rings(struct wm_softc *sc)
   3989 {
   3990 	pcireg_t preg;
   3991 	uint32_t reg;
   3992 	int nexttx;
   3993 
   3994 	/* First, disable MULR fix in FEXTNVM11 */
   3995 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3996 	reg |= FEXTNVM11_DIS_MULRFIX;
   3997 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3998 
   3999 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4000 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4001 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   4002 		struct wm_txqueue *txq;
   4003 		wiseman_txdesc_t *txd;
   4004 
   4005 		/* TX */
   4006 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4007 		    device_xname(sc->sc_dev), preg, reg);
   4008 		reg = CSR_READ(sc, WMREG_TCTL);
   4009 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4010 
   4011 		txq = &sc->sc_queue[0].wmq_txq;
   4012 		nexttx = txq->txq_next;
   4013 		txd = &txq->txq_descs[nexttx];
   4014 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4015 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4016 		txd->wtx_fields.wtxu_status = 0;
   4017 		txd->wtx_fields.wtxu_options = 0;
   4018 		txd->wtx_fields.wtxu_vlan = 0;
   4019 
   4020 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4021 			BUS_SPACE_BARRIER_WRITE);
   4022 
   4023 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4024 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4025 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4026 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4027 		delay(250);
   4028 	}
   4029 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4030 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   4031 		uint32_t rctl;
   4032 
   4033 		/* RX */
   4034 		printf("%s: Need RX flush (reg = %08x)\n",
   4035 		    device_xname(sc->sc_dev), preg);
   4036 		rctl = CSR_READ(sc, WMREG_RCTL);
   4037 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4038 		CSR_WRITE_FLUSH(sc);
   4039 		delay(150);
   4040 
   4041 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4042 		/* zero the lower 14 bits (prefetch and host thresholds) */
   4043 		reg &= 0xffffc000;
   4044 		/*
   4045 		 * update thresholds: prefetch threshold to 31, host threshold
   4046 		 * to 1 and make sure the granularity is "descriptors" and not
   4047 		 * "cache lines"
   4048 		 */
   4049 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4050 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4051 
   4052 		/*
   4053 		 * momentarily enable the RX ring for the changes to take
   4054 		 * effect
   4055 		 */
   4056 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4057 		CSR_WRITE_FLUSH(sc);
   4058 		delay(150);
   4059 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4060 	}
   4061 }
   4062 
   4063 /*
   4064  * wm_reset:
   4065  *
   4066  *	Reset the i82542 chip.
   4067  */
   4068 static void
   4069 wm_reset(struct wm_softc *sc)
   4070 {
   4071 	int phy_reset = 0;
   4072 	int i, error = 0;
   4073 	uint32_t reg;
   4074 
   4075 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4076 		device_xname(sc->sc_dev), __func__));
   4077 	KASSERT(sc->sc_type != 0);
   4078 
   4079 	/*
   4080 	 * Allocate on-chip memory according to the MTU size.
   4081 	 * The Packet Buffer Allocation register must be written
   4082 	 * before the chip is reset.
   4083 	 */
   4084 	switch (sc->sc_type) {
   4085 	case WM_T_82547:
   4086 	case WM_T_82547_2:
   4087 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4088 		    PBA_22K : PBA_30K;
   4089 		for (i = 0; i < sc->sc_nqueues; i++) {
   4090 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4091 			txq->txq_fifo_head = 0;
   4092 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4093 			txq->txq_fifo_size =
   4094 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4095 			txq->txq_fifo_stall = 0;
   4096 		}
   4097 		break;
   4098 	case WM_T_82571:
   4099 	case WM_T_82572:
   4100 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4101 	case WM_T_80003:
   4102 		sc->sc_pba = PBA_32K;
   4103 		break;
   4104 	case WM_T_82573:
   4105 		sc->sc_pba = PBA_12K;
   4106 		break;
   4107 	case WM_T_82574:
   4108 	case WM_T_82583:
   4109 		sc->sc_pba = PBA_20K;
   4110 		break;
   4111 	case WM_T_82576:
   4112 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4113 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4114 		break;
   4115 	case WM_T_82580:
   4116 	case WM_T_I350:
   4117 	case WM_T_I354:
   4118 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4119 		break;
   4120 	case WM_T_I210:
   4121 	case WM_T_I211:
   4122 		sc->sc_pba = PBA_34K;
   4123 		break;
   4124 	case WM_T_ICH8:
   4125 		/* Workaround for a bit corruption issue in FIFO memory */
   4126 		sc->sc_pba = PBA_8K;
   4127 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4128 		break;
   4129 	case WM_T_ICH9:
   4130 	case WM_T_ICH10:
   4131 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4132 		    PBA_14K : PBA_10K;
   4133 		break;
   4134 	case WM_T_PCH:
   4135 	case WM_T_PCH2:
   4136 	case WM_T_PCH_LPT:
   4137 	case WM_T_PCH_SPT:
   4138 		sc->sc_pba = PBA_26K;
   4139 		break;
   4140 	default:
   4141 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4142 		    PBA_40K : PBA_48K;
   4143 		break;
   4144 	}
   4145 	/*
   4146 	 * Only old or non-multiqueue devices have the PBA register
   4147 	 * XXX Need special handling for 82575.
   4148 	 */
   4149 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4150 	    || (sc->sc_type == WM_T_82575))
   4151 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4152 
   4153 	/* Prevent the PCI-E bus from sticking */
   4154 	if (sc->sc_flags & WM_F_PCIE) {
   4155 		int timeout = 800;
   4156 
   4157 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4158 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4159 
   4160 		while (timeout--) {
   4161 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4162 			    == 0)
   4163 				break;
   4164 			delay(100);
   4165 		}
   4166 		if (timeout == 0)
   4167 			device_printf(sc->sc_dev,
   4168 			    "failed to disable busmastering\n");
   4169 	}
   4170 
   4171 	/* Set the completion timeout for interface */
   4172 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4173 	    || (sc->sc_type == WM_T_82580)
   4174 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4175 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4176 		wm_set_pcie_completion_timeout(sc);
   4177 
   4178 	/* Clear interrupt */
   4179 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4180 	if (wm_is_using_msix(sc)) {
   4181 		if (sc->sc_type != WM_T_82574) {
   4182 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4183 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4184 		} else {
   4185 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4186 		}
   4187 	}
   4188 
   4189 	/* Stop the transmit and receive processes. */
   4190 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4191 	sc->sc_rctl &= ~RCTL_EN;
   4192 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4193 	CSR_WRITE_FLUSH(sc);
   4194 
   4195 	/* XXX set_tbi_sbp_82543() */
   4196 
   4197 	delay(10*1000);
   4198 
   4199 	/* Must acquire the MDIO ownership before MAC reset */
   4200 	switch (sc->sc_type) {
   4201 	case WM_T_82573:
   4202 	case WM_T_82574:
   4203 	case WM_T_82583:
   4204 		error = wm_get_hw_semaphore_82573(sc);
   4205 		break;
   4206 	default:
   4207 		break;
   4208 	}
   4209 
   4210 	/*
   4211 	 * 82541 Errata 29? & 82547 Errata 28?
   4212 	 * See also the description about PHY_RST bit in CTRL register
   4213 	 * in 8254x_GBe_SDM.pdf.
   4214 	 */
   4215 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4216 		CSR_WRITE(sc, WMREG_CTRL,
   4217 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4218 		CSR_WRITE_FLUSH(sc);
   4219 		delay(5000);
   4220 	}
   4221 
   4222 	switch (sc->sc_type) {
   4223 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4224 	case WM_T_82541:
   4225 	case WM_T_82541_2:
   4226 	case WM_T_82547:
   4227 	case WM_T_82547_2:
   4228 		/*
   4229 		 * On some chipsets, a reset through a memory-mapped write
   4230 		 * cycle can cause the chip to reset before completing the
   4231 		 * write cycle.  This causes major headache that can be
   4232 		 * avoided by issuing the reset via indirect register writes
   4233 		 * through I/O space.
   4234 		 *
   4235 		 * So, if we successfully mapped the I/O BAR at attach time,
   4236 		 * use that.  Otherwise, try our luck with a memory-mapped
   4237 		 * reset.
   4238 		 */
   4239 		if (sc->sc_flags & WM_F_IOH_VALID)
   4240 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4241 		else
   4242 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4243 		break;
   4244 	case WM_T_82545_3:
   4245 	case WM_T_82546_3:
   4246 		/* Use the shadow control register on these chips. */
   4247 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4248 		break;
   4249 	case WM_T_80003:
   4250 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4251 		sc->phy.acquire(sc);
   4252 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4253 		sc->phy.release(sc);
   4254 		break;
   4255 	case WM_T_ICH8:
   4256 	case WM_T_ICH9:
   4257 	case WM_T_ICH10:
   4258 	case WM_T_PCH:
   4259 	case WM_T_PCH2:
   4260 	case WM_T_PCH_LPT:
   4261 	case WM_T_PCH_SPT:
   4262 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4263 		if (wm_phy_resetisblocked(sc) == false) {
   4264 			/*
   4265 			 * Gate automatic PHY configuration by hardware on
   4266 			 * non-managed 82579
   4267 			 */
   4268 			if ((sc->sc_type == WM_T_PCH2)
   4269 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4270 				== 0))
   4271 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4272 
   4273 			reg |= CTRL_PHY_RESET;
   4274 			phy_reset = 1;
   4275 		} else
   4276 			printf("XXX reset is blocked!!!\n");
   4277 		sc->phy.acquire(sc);
   4278 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4279 		/* Don't insert a completion barrier when reset */
   4280 		delay(20*1000);
   4281 		mutex_exit(sc->sc_ich_phymtx);
   4282 		break;
   4283 	case WM_T_82580:
   4284 	case WM_T_I350:
   4285 	case WM_T_I354:
   4286 	case WM_T_I210:
   4287 	case WM_T_I211:
   4288 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4289 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4290 			CSR_WRITE_FLUSH(sc);
   4291 		delay(5000);
   4292 		break;
   4293 	case WM_T_82542_2_0:
   4294 	case WM_T_82542_2_1:
   4295 	case WM_T_82543:
   4296 	case WM_T_82540:
   4297 	case WM_T_82545:
   4298 	case WM_T_82546:
   4299 	case WM_T_82571:
   4300 	case WM_T_82572:
   4301 	case WM_T_82573:
   4302 	case WM_T_82574:
   4303 	case WM_T_82575:
   4304 	case WM_T_82576:
   4305 	case WM_T_82583:
   4306 	default:
   4307 		/* Everything else can safely use the documented method. */
   4308 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4309 		break;
   4310 	}
   4311 
   4312 	/* Must release the MDIO ownership after MAC reset */
   4313 	switch (sc->sc_type) {
   4314 	case WM_T_82573:
   4315 	case WM_T_82574:
   4316 	case WM_T_82583:
   4317 		if (error == 0)
   4318 			wm_put_hw_semaphore_82573(sc);
   4319 		break;
   4320 	default:
   4321 		break;
   4322 	}
   4323 
   4324 	if (phy_reset != 0)
   4325 		wm_get_cfg_done(sc);
   4326 
   4327 	/* reload EEPROM */
   4328 	switch (sc->sc_type) {
   4329 	case WM_T_82542_2_0:
   4330 	case WM_T_82542_2_1:
   4331 	case WM_T_82543:
   4332 	case WM_T_82544:
   4333 		delay(10);
   4334 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4335 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4336 		CSR_WRITE_FLUSH(sc);
   4337 		delay(2000);
   4338 		break;
   4339 	case WM_T_82540:
   4340 	case WM_T_82545:
   4341 	case WM_T_82545_3:
   4342 	case WM_T_82546:
   4343 	case WM_T_82546_3:
   4344 		delay(5*1000);
   4345 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4346 		break;
   4347 	case WM_T_82541:
   4348 	case WM_T_82541_2:
   4349 	case WM_T_82547:
   4350 	case WM_T_82547_2:
   4351 		delay(20000);
   4352 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4353 		break;
   4354 	case WM_T_82571:
   4355 	case WM_T_82572:
   4356 	case WM_T_82573:
   4357 	case WM_T_82574:
   4358 	case WM_T_82583:
   4359 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4360 			delay(10);
   4361 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4362 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4363 			CSR_WRITE_FLUSH(sc);
   4364 		}
   4365 		/* check EECD_EE_AUTORD */
   4366 		wm_get_auto_rd_done(sc);
   4367 		/*
   4368 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4369 		 * is set.
   4370 		 */
   4371 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4372 		    || (sc->sc_type == WM_T_82583))
   4373 			delay(25*1000);
   4374 		break;
   4375 	case WM_T_82575:
   4376 	case WM_T_82576:
   4377 	case WM_T_82580:
   4378 	case WM_T_I350:
   4379 	case WM_T_I354:
   4380 	case WM_T_I210:
   4381 	case WM_T_I211:
   4382 	case WM_T_80003:
   4383 		/* check EECD_EE_AUTORD */
   4384 		wm_get_auto_rd_done(sc);
   4385 		break;
   4386 	case WM_T_ICH8:
   4387 	case WM_T_ICH9:
   4388 	case WM_T_ICH10:
   4389 	case WM_T_PCH:
   4390 	case WM_T_PCH2:
   4391 	case WM_T_PCH_LPT:
   4392 	case WM_T_PCH_SPT:
   4393 		break;
   4394 	default:
   4395 		panic("%s: unknown type\n", __func__);
   4396 	}
   4397 
   4398 	/* Check whether EEPROM is present or not */
   4399 	switch (sc->sc_type) {
   4400 	case WM_T_82575:
   4401 	case WM_T_82576:
   4402 	case WM_T_82580:
   4403 	case WM_T_I350:
   4404 	case WM_T_I354:
   4405 	case WM_T_ICH8:
   4406 	case WM_T_ICH9:
   4407 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4408 			/* Not found */
   4409 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4410 			if (sc->sc_type == WM_T_82575)
   4411 				wm_reset_init_script_82575(sc);
   4412 		}
   4413 		break;
   4414 	default:
   4415 		break;
   4416 	}
   4417 
   4418 	if (phy_reset != 0)
   4419 		wm_phy_post_reset(sc);
   4420 
   4421 	if ((sc->sc_type == WM_T_82580)
   4422 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4423 		/* clear global device reset status bit */
   4424 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4425 	}
   4426 
   4427 	/* Clear any pending interrupt events. */
   4428 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4429 	reg = CSR_READ(sc, WMREG_ICR);
   4430 	if (wm_is_using_msix(sc)) {
   4431 		if (sc->sc_type != WM_T_82574) {
   4432 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4433 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4434 		} else
   4435 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4436 	}
   4437 
   4438 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4439 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4440 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4441 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4442 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4443 		reg |= KABGTXD_BGSQLBIAS;
   4444 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4445 	}
   4446 
   4447 	/* reload sc_ctrl */
   4448 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4449 
   4450 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4451 		wm_set_eee_i350(sc);
   4452 
   4453 	/*
   4454 	 * For PCH, this write will make sure that any noise will be detected
   4455 	 * as a CRC error and be dropped rather than show up as a bad packet
   4456 	 * to the DMA engine
   4457 	 */
   4458 	if (sc->sc_type == WM_T_PCH)
   4459 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4460 
   4461 	if (sc->sc_type >= WM_T_82544)
   4462 		CSR_WRITE(sc, WMREG_WUC, 0);
   4463 
   4464 	wm_reset_mdicnfg_82580(sc);
   4465 
   4466 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4467 		wm_pll_workaround_i210(sc);
   4468 }
   4469 
   4470 /*
   4471  * wm_add_rxbuf:
   4472  *
   4473  *	Add a receive buffer to the indiciated descriptor.
   4474  */
   4475 static int
   4476 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4477 {
   4478 	struct wm_softc *sc = rxq->rxq_sc;
   4479 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4480 	struct mbuf *m;
   4481 	int error;
   4482 
   4483 	KASSERT(mutex_owned(rxq->rxq_lock));
   4484 
   4485 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4486 	if (m == NULL)
   4487 		return ENOBUFS;
   4488 
   4489 	MCLGET(m, M_DONTWAIT);
   4490 	if ((m->m_flags & M_EXT) == 0) {
   4491 		m_freem(m);
   4492 		return ENOBUFS;
   4493 	}
   4494 
   4495 	if (rxs->rxs_mbuf != NULL)
   4496 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4497 
   4498 	rxs->rxs_mbuf = m;
   4499 
   4500 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4501 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4502 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4503 	if (error) {
   4504 		/* XXX XXX XXX */
   4505 		aprint_error_dev(sc->sc_dev,
   4506 		    "unable to load rx DMA map %d, error = %d\n",
   4507 		    idx, error);
   4508 		panic("wm_add_rxbuf");
   4509 	}
   4510 
   4511 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4512 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4513 
   4514 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4515 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4516 			wm_init_rxdesc(rxq, idx);
   4517 	} else
   4518 		wm_init_rxdesc(rxq, idx);
   4519 
   4520 	return 0;
   4521 }
   4522 
   4523 /*
   4524  * wm_rxdrain:
   4525  *
   4526  *	Drain the receive queue.
   4527  */
   4528 static void
   4529 wm_rxdrain(struct wm_rxqueue *rxq)
   4530 {
   4531 	struct wm_softc *sc = rxq->rxq_sc;
   4532 	struct wm_rxsoft *rxs;
   4533 	int i;
   4534 
   4535 	KASSERT(mutex_owned(rxq->rxq_lock));
   4536 
   4537 	for (i = 0; i < WM_NRXDESC; i++) {
   4538 		rxs = &rxq->rxq_soft[i];
   4539 		if (rxs->rxs_mbuf != NULL) {
   4540 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4541 			m_freem(rxs->rxs_mbuf);
   4542 			rxs->rxs_mbuf = NULL;
   4543 		}
   4544 	}
   4545 }
   4546 
   4547 
   4548 /*
   4549  * XXX copy from FreeBSD's sys/net/rss_config.c
   4550  */
   4551 /*
   4552  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4553  * effectiveness may be limited by algorithm choice and available entropy
   4554  * during the boot.
   4555  *
   4556  * XXXRW: And that we don't randomize it yet!
   4557  *
   4558  * This is the default Microsoft RSS specification key which is also
   4559  * the Chelsio T5 firmware default key.
   4560  */
   4561 #define RSS_KEYSIZE 40
   4562 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4563 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4564 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4565 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4566 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4567 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4568 };
   4569 
   4570 /*
   4571  * Caller must pass an array of size sizeof(rss_key).
   4572  *
   4573  * XXX
   4574  * As if_ixgbe may use this function, this function should not be
   4575  * if_wm specific function.
   4576  */
   4577 static void
   4578 wm_rss_getkey(uint8_t *key)
   4579 {
   4580 
   4581 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4582 }
   4583 
   4584 /*
   4585  * Setup registers for RSS.
   4586  *
   4587  * XXX not yet VMDq support
   4588  */
   4589 static void
   4590 wm_init_rss(struct wm_softc *sc)
   4591 {
   4592 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4593 	int i;
   4594 
   4595 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4596 
   4597 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4598 		int qid, reta_ent;
   4599 
   4600 		qid  = i % sc->sc_nqueues;
   4601 		switch(sc->sc_type) {
   4602 		case WM_T_82574:
   4603 			reta_ent = __SHIFTIN(qid,
   4604 			    RETA_ENT_QINDEX_MASK_82574);
   4605 			break;
   4606 		case WM_T_82575:
   4607 			reta_ent = __SHIFTIN(qid,
   4608 			    RETA_ENT_QINDEX1_MASK_82575);
   4609 			break;
   4610 		default:
   4611 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4612 			break;
   4613 		}
   4614 
   4615 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4616 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4617 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4618 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4619 	}
   4620 
   4621 	wm_rss_getkey((uint8_t *)rss_key);
   4622 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4623 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4624 
   4625 	if (sc->sc_type == WM_T_82574)
   4626 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4627 	else
   4628 		mrqc = MRQC_ENABLE_RSS_MQ;
   4629 
   4630 	/*
   4631 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4632 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4633 	 */
   4634 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4635 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4636 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4637 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4638 
   4639 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4640 }
   4641 
   4642 /*
   4643  * Adjust TX and RX queue numbers which the system actulally uses.
   4644  *
   4645  * The numbers are affected by below parameters.
   4646  *     - The nubmer of hardware queues
   4647  *     - The number of MSI-X vectors (= "nvectors" argument)
   4648  *     - ncpu
   4649  */
   4650 static void
   4651 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4652 {
   4653 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4654 
   4655 	if (nvectors < 2) {
   4656 		sc->sc_nqueues = 1;
   4657 		return;
   4658 	}
   4659 
   4660 	switch(sc->sc_type) {
   4661 	case WM_T_82572:
   4662 		hw_ntxqueues = 2;
   4663 		hw_nrxqueues = 2;
   4664 		break;
   4665 	case WM_T_82574:
   4666 		hw_ntxqueues = 2;
   4667 		hw_nrxqueues = 2;
   4668 		break;
   4669 	case WM_T_82575:
   4670 		hw_ntxqueues = 4;
   4671 		hw_nrxqueues = 4;
   4672 		break;
   4673 	case WM_T_82576:
   4674 		hw_ntxqueues = 16;
   4675 		hw_nrxqueues = 16;
   4676 		break;
   4677 	case WM_T_82580:
   4678 	case WM_T_I350:
   4679 	case WM_T_I354:
   4680 		hw_ntxqueues = 8;
   4681 		hw_nrxqueues = 8;
   4682 		break;
   4683 	case WM_T_I210:
   4684 		hw_ntxqueues = 4;
   4685 		hw_nrxqueues = 4;
   4686 		break;
   4687 	case WM_T_I211:
   4688 		hw_ntxqueues = 2;
   4689 		hw_nrxqueues = 2;
   4690 		break;
   4691 		/*
   4692 		 * As below ethernet controllers does not support MSI-X,
   4693 		 * this driver let them not use multiqueue.
   4694 		 *     - WM_T_80003
   4695 		 *     - WM_T_ICH8
   4696 		 *     - WM_T_ICH9
   4697 		 *     - WM_T_ICH10
   4698 		 *     - WM_T_PCH
   4699 		 *     - WM_T_PCH2
   4700 		 *     - WM_T_PCH_LPT
   4701 		 */
   4702 	default:
   4703 		hw_ntxqueues = 1;
   4704 		hw_nrxqueues = 1;
   4705 		break;
   4706 	}
   4707 
   4708 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4709 
   4710 	/*
   4711 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4712 	 * the number of queues used actually.
   4713 	 */
   4714 	if (nvectors < hw_nqueues + 1) {
   4715 		sc->sc_nqueues = nvectors - 1;
   4716 	} else {
   4717 		sc->sc_nqueues = hw_nqueues;
   4718 	}
   4719 
   4720 	/*
   4721 	 * As queues more then cpus cannot improve scaling, we limit
   4722 	 * the number of queues used actually.
   4723 	 */
   4724 	if (ncpu < sc->sc_nqueues)
   4725 		sc->sc_nqueues = ncpu;
   4726 }
   4727 
   4728 static inline bool
   4729 wm_is_using_msix(struct wm_softc *sc)
   4730 {
   4731 
   4732 	return (sc->sc_nintrs > 1);
   4733 }
   4734 
   4735 static inline bool
   4736 wm_is_using_multiqueue(struct wm_softc *sc)
   4737 {
   4738 
   4739 	return (sc->sc_nqueues > 1);
   4740 }
   4741 
   4742 static int
   4743 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4744 {
   4745 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4746 	wmq->wmq_id = qidx;
   4747 	wmq->wmq_intr_idx = intr_idx;
   4748 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4749 #ifdef WM_MPSAFE
   4750 	    | SOFTINT_MPSAFE
   4751 #endif
   4752 	    , wm_handle_queue, wmq);
   4753 	if (wmq->wmq_si != NULL)
   4754 		return 0;
   4755 
   4756 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4757 	    wmq->wmq_id);
   4758 
   4759 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4760 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4761 	return ENOMEM;
   4762 }
   4763 
   4764 /*
   4765  * Both single interrupt MSI and INTx can use this function.
   4766  */
   4767 static int
   4768 wm_setup_legacy(struct wm_softc *sc)
   4769 {
   4770 	pci_chipset_tag_t pc = sc->sc_pc;
   4771 	const char *intrstr = NULL;
   4772 	char intrbuf[PCI_INTRSTR_LEN];
   4773 	int error;
   4774 
   4775 	error = wm_alloc_txrx_queues(sc);
   4776 	if (error) {
   4777 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4778 		    error);
   4779 		return ENOMEM;
   4780 	}
   4781 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4782 	    sizeof(intrbuf));
   4783 #ifdef WM_MPSAFE
   4784 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4785 #endif
   4786 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4787 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4788 	if (sc->sc_ihs[0] == NULL) {
   4789 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4790 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4791 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4792 		return ENOMEM;
   4793 	}
   4794 
   4795 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4796 	sc->sc_nintrs = 1;
   4797 
   4798 	return wm_softint_establish(sc, 0, 0);
   4799 }
   4800 
   4801 static int
   4802 wm_setup_msix(struct wm_softc *sc)
   4803 {
   4804 	void *vih;
   4805 	kcpuset_t *affinity;
   4806 	int qidx, error, intr_idx, txrx_established;
   4807 	pci_chipset_tag_t pc = sc->sc_pc;
   4808 	const char *intrstr = NULL;
   4809 	char intrbuf[PCI_INTRSTR_LEN];
   4810 	char intr_xname[INTRDEVNAMEBUF];
   4811 
   4812 	if (sc->sc_nqueues < ncpu) {
   4813 		/*
   4814 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4815 		 * interrupts start from CPU#1.
   4816 		 */
   4817 		sc->sc_affinity_offset = 1;
   4818 	} else {
   4819 		/*
   4820 		 * In this case, this device use all CPUs. So, we unify
   4821 		 * affinitied cpu_index to msix vector number for readability.
   4822 		 */
   4823 		sc->sc_affinity_offset = 0;
   4824 	}
   4825 
   4826 	error = wm_alloc_txrx_queues(sc);
   4827 	if (error) {
   4828 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4829 		    error);
   4830 		return ENOMEM;
   4831 	}
   4832 
   4833 	kcpuset_create(&affinity, false);
   4834 	intr_idx = 0;
   4835 
   4836 	/*
   4837 	 * TX and RX
   4838 	 */
   4839 	txrx_established = 0;
   4840 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4841 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4842 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4843 
   4844 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4845 		    sizeof(intrbuf));
   4846 #ifdef WM_MPSAFE
   4847 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4848 		    PCI_INTR_MPSAFE, true);
   4849 #endif
   4850 		memset(intr_xname, 0, sizeof(intr_xname));
   4851 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4852 		    device_xname(sc->sc_dev), qidx);
   4853 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4854 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4855 		if (vih == NULL) {
   4856 			aprint_error_dev(sc->sc_dev,
   4857 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4858 			    intrstr ? " at " : "",
   4859 			    intrstr ? intrstr : "");
   4860 
   4861 			goto fail;
   4862 		}
   4863 		kcpuset_zero(affinity);
   4864 		/* Round-robin affinity */
   4865 		kcpuset_set(affinity, affinity_to);
   4866 		error = interrupt_distribute(vih, affinity, NULL);
   4867 		if (error == 0) {
   4868 			aprint_normal_dev(sc->sc_dev,
   4869 			    "for TX and RX interrupting at %s affinity to %u\n",
   4870 			    intrstr, affinity_to);
   4871 		} else {
   4872 			aprint_normal_dev(sc->sc_dev,
   4873 			    "for TX and RX interrupting at %s\n", intrstr);
   4874 		}
   4875 		sc->sc_ihs[intr_idx] = vih;
   4876 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4877 			goto fail;
   4878 		txrx_established++;
   4879 		intr_idx++;
   4880 	}
   4881 
   4882 	/*
   4883 	 * LINK
   4884 	 */
   4885 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4886 	    sizeof(intrbuf));
   4887 #ifdef WM_MPSAFE
   4888 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4889 #endif
   4890 	memset(intr_xname, 0, sizeof(intr_xname));
   4891 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4892 	    device_xname(sc->sc_dev));
   4893 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4894 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4895 	if (vih == NULL) {
   4896 		aprint_error_dev(sc->sc_dev,
   4897 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4898 		    intrstr ? " at " : "",
   4899 		    intrstr ? intrstr : "");
   4900 
   4901 		goto fail;
   4902 	}
   4903 	/* keep default affinity to LINK interrupt */
   4904 	aprint_normal_dev(sc->sc_dev,
   4905 	    "for LINK interrupting at %s\n", intrstr);
   4906 	sc->sc_ihs[intr_idx] = vih;
   4907 	sc->sc_link_intr_idx = intr_idx;
   4908 
   4909 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4910 	kcpuset_destroy(affinity);
   4911 	return 0;
   4912 
   4913  fail:
   4914 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4915 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4916 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4917 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4918 	}
   4919 
   4920 	kcpuset_destroy(affinity);
   4921 	return ENOMEM;
   4922 }
   4923 
   4924 static void
   4925 wm_turnon(struct wm_softc *sc)
   4926 {
   4927 	int i;
   4928 
   4929 	KASSERT(WM_CORE_LOCKED(sc));
   4930 
   4931 	/*
   4932 	 * must unset stopping flags in ascending order.
   4933 	 */
   4934 	for(i = 0; i < sc->sc_nqueues; i++) {
   4935 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4936 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4937 
   4938 		mutex_enter(txq->txq_lock);
   4939 		txq->txq_stopping = false;
   4940 		mutex_exit(txq->txq_lock);
   4941 
   4942 		mutex_enter(rxq->rxq_lock);
   4943 		rxq->rxq_stopping = false;
   4944 		mutex_exit(rxq->rxq_lock);
   4945 	}
   4946 
   4947 	sc->sc_core_stopping = false;
   4948 }
   4949 
   4950 static void
   4951 wm_turnoff(struct wm_softc *sc)
   4952 {
   4953 	int i;
   4954 
   4955 	KASSERT(WM_CORE_LOCKED(sc));
   4956 
   4957 	sc->sc_core_stopping = true;
   4958 
   4959 	/*
   4960 	 * must set stopping flags in ascending order.
   4961 	 */
   4962 	for(i = 0; i < sc->sc_nqueues; i++) {
   4963 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4964 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4965 
   4966 		mutex_enter(rxq->rxq_lock);
   4967 		rxq->rxq_stopping = true;
   4968 		mutex_exit(rxq->rxq_lock);
   4969 
   4970 		mutex_enter(txq->txq_lock);
   4971 		txq->txq_stopping = true;
   4972 		mutex_exit(txq->txq_lock);
   4973 	}
   4974 }
   4975 
   4976 /*
   4977  * write interrupt interval value to ITR or EITR
   4978  */
   4979 static void
   4980 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4981 {
   4982 
   4983 	if (!wmq->wmq_set_itr)
   4984 		return;
   4985 
   4986 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4987 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4988 
   4989 		/*
   4990 		 * 82575 doesn't have CNT_INGR field.
   4991 		 * So, overwrite counter field by software.
   4992 		 */
   4993 		if (sc->sc_type == WM_T_82575)
   4994 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4995 		else
   4996 			eitr |= EITR_CNT_INGR;
   4997 
   4998 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4999 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5000 		/*
   5001 		 * 82574 has both ITR and EITR. SET EITR when we use
   5002 		 * the multi queue function with MSI-X.
   5003 		 */
   5004 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5005 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5006 	} else {
   5007 		KASSERT(wmq->wmq_id == 0);
   5008 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5009 	}
   5010 
   5011 	wmq->wmq_set_itr = false;
   5012 }
   5013 
   5014 /*
   5015  * TODO
   5016  * Below dynamic calculation of itr is almost the same as linux igb,
   5017  * however it does not fit to wm(4). So, we will have been disable AIM
   5018  * until we will find appropriate calculation of itr.
   5019  */
   5020 /*
   5021  * calculate interrupt interval value to be going to write register in
   5022  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5023  */
   5024 static void
   5025 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5026 {
   5027 #ifdef NOTYET
   5028 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5029 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5030 	uint32_t avg_size = 0;
   5031 	uint32_t new_itr;
   5032 
   5033 	if (rxq->rxq_packets)
   5034 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5035 	if (txq->txq_packets)
   5036 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5037 
   5038 	if (avg_size == 0) {
   5039 		new_itr = 450; /* restore default value */
   5040 		goto out;
   5041 	}
   5042 
   5043 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5044 	avg_size += 24;
   5045 
   5046 	/* Don't starve jumbo frames */
   5047 	avg_size = min(avg_size, 3000);
   5048 
   5049 	/* Give a little boost to mid-size frames */
   5050 	if ((avg_size > 300) && (avg_size < 1200))
   5051 		new_itr = avg_size / 3;
   5052 	else
   5053 		new_itr = avg_size / 2;
   5054 
   5055 out:
   5056 	/*
   5057 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5058 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5059 	 */
   5060 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5061 		new_itr *= 4;
   5062 
   5063 	if (new_itr != wmq->wmq_itr) {
   5064 		wmq->wmq_itr = new_itr;
   5065 		wmq->wmq_set_itr = true;
   5066 	} else
   5067 		wmq->wmq_set_itr = false;
   5068 
   5069 	rxq->rxq_packets = 0;
   5070 	rxq->rxq_bytes = 0;
   5071 	txq->txq_packets = 0;
   5072 	txq->txq_bytes = 0;
   5073 #endif
   5074 }
   5075 
   5076 /*
   5077  * wm_init:		[ifnet interface function]
   5078  *
   5079  *	Initialize the interface.
   5080  */
   5081 static int
   5082 wm_init(struct ifnet *ifp)
   5083 {
   5084 	struct wm_softc *sc = ifp->if_softc;
   5085 	int ret;
   5086 
   5087 	WM_CORE_LOCK(sc);
   5088 	ret = wm_init_locked(ifp);
   5089 	WM_CORE_UNLOCK(sc);
   5090 
   5091 	return ret;
   5092 }
   5093 
   5094 static int
   5095 wm_init_locked(struct ifnet *ifp)
   5096 {
   5097 	struct wm_softc *sc = ifp->if_softc;
   5098 	int i, j, trynum, error = 0;
   5099 	uint32_t reg;
   5100 
   5101 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5102 		device_xname(sc->sc_dev), __func__));
   5103 	KASSERT(WM_CORE_LOCKED(sc));
   5104 
   5105 	/*
   5106 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5107 	 * There is a small but measurable benefit to avoiding the adjusment
   5108 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5109 	 * on such platforms.  One possibility is that the DMA itself is
   5110 	 * slightly more efficient if the front of the entire packet (instead
   5111 	 * of the front of the headers) is aligned.
   5112 	 *
   5113 	 * Note we must always set align_tweak to 0 if we are using
   5114 	 * jumbo frames.
   5115 	 */
   5116 #ifdef __NO_STRICT_ALIGNMENT
   5117 	sc->sc_align_tweak = 0;
   5118 #else
   5119 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5120 		sc->sc_align_tweak = 0;
   5121 	else
   5122 		sc->sc_align_tweak = 2;
   5123 #endif /* __NO_STRICT_ALIGNMENT */
   5124 
   5125 	/* Cancel any pending I/O. */
   5126 	wm_stop_locked(ifp, 0);
   5127 
   5128 	/* update statistics before reset */
   5129 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5130 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5131 
   5132 	/* AMT based hardware can now take control from firmware */
   5133 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5134 		wm_get_hw_control(sc);
   5135 
   5136 	/* PCH_SPT hardware workaround */
   5137 	if (sc->sc_type == WM_T_PCH_SPT)
   5138 		wm_flush_desc_rings(sc);
   5139 
   5140 	/* Reset the chip to a known state. */
   5141 	wm_reset(sc);
   5142 
   5143 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5144 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5145 		wm_legacy_irq_quirk_spt(sc);
   5146 
   5147 	/* Init hardware bits */
   5148 	wm_initialize_hardware_bits(sc);
   5149 
   5150 	/* Reset the PHY. */
   5151 	if (sc->sc_flags & WM_F_HAS_MII)
   5152 		wm_gmii_reset(sc);
   5153 
   5154 	/* Calculate (E)ITR value */
   5155 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5156 		/*
   5157 		 * For NEWQUEUE's EITR (except for 82575).
   5158 		 * 82575's EITR should be set same throttling value as other
   5159 		 * old controllers' ITR because the interrupt/sec calculation
   5160 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5161 		 *
   5162 		 * 82574's EITR should be set same throttling value as ITR.
   5163 		 *
   5164 		 * For N interrupts/sec, set this value to:
   5165 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5166 		 */
   5167 		sc->sc_itr_init = 450;
   5168 	} else if (sc->sc_type >= WM_T_82543) {
   5169 		/*
   5170 		 * Set up the interrupt throttling register (units of 256ns)
   5171 		 * Note that a footnote in Intel's documentation says this
   5172 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5173 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5174 		 * that that is also true for the 1024ns units of the other
   5175 		 * interrupt-related timer registers -- so, really, we ought
   5176 		 * to divide this value by 4 when the link speed is low.
   5177 		 *
   5178 		 * XXX implement this division at link speed change!
   5179 		 */
   5180 
   5181 		/*
   5182 		 * For N interrupts/sec, set this value to:
   5183 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5184 		 * absolute and packet timer values to this value
   5185 		 * divided by 4 to get "simple timer" behavior.
   5186 		 */
   5187 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5188 	}
   5189 
   5190 	error = wm_init_txrx_queues(sc);
   5191 	if (error)
   5192 		goto out;
   5193 
   5194 	/*
   5195 	 * Clear out the VLAN table -- we don't use it (yet).
   5196 	 */
   5197 	CSR_WRITE(sc, WMREG_VET, 0);
   5198 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5199 		trynum = 10; /* Due to hw errata */
   5200 	else
   5201 		trynum = 1;
   5202 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5203 		for (j = 0; j < trynum; j++)
   5204 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5205 
   5206 	/*
   5207 	 * Set up flow-control parameters.
   5208 	 *
   5209 	 * XXX Values could probably stand some tuning.
   5210 	 */
   5211 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5212 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5213 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5214 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5215 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5216 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5217 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5218 	}
   5219 
   5220 	sc->sc_fcrtl = FCRTL_DFLT;
   5221 	if (sc->sc_type < WM_T_82543) {
   5222 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5223 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5224 	} else {
   5225 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5226 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5227 	}
   5228 
   5229 	if (sc->sc_type == WM_T_80003)
   5230 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5231 	else
   5232 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5233 
   5234 	/* Writes the control register. */
   5235 	wm_set_vlan(sc);
   5236 
   5237 	if (sc->sc_flags & WM_F_HAS_MII) {
   5238 		int val;
   5239 
   5240 		switch (sc->sc_type) {
   5241 		case WM_T_80003:
   5242 		case WM_T_ICH8:
   5243 		case WM_T_ICH9:
   5244 		case WM_T_ICH10:
   5245 		case WM_T_PCH:
   5246 		case WM_T_PCH2:
   5247 		case WM_T_PCH_LPT:
   5248 		case WM_T_PCH_SPT:
   5249 			/*
   5250 			 * Set the mac to wait the maximum time between each
   5251 			 * iteration and increase the max iterations when
   5252 			 * polling the phy; this fixes erroneous timeouts at
   5253 			 * 10Mbps.
   5254 			 */
   5255 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5256 			    0xFFFF);
   5257 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5258 			val |= 0x3F;
   5259 			wm_kmrn_writereg(sc,
   5260 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5261 			break;
   5262 		default:
   5263 			break;
   5264 		}
   5265 
   5266 		if (sc->sc_type == WM_T_80003) {
   5267 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5268 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5269 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5270 
   5271 			/* Bypass RX and TX FIFO's */
   5272 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5273 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5274 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5275 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5276 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5277 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5278 		}
   5279 	}
   5280 #if 0
   5281 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5282 #endif
   5283 
   5284 	/* Set up checksum offload parameters. */
   5285 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5286 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5287 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5288 		reg |= RXCSUM_IPOFL;
   5289 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5290 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5291 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5292 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5293 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5294 
   5295 	/* Set registers about MSI-X */
   5296 	if (wm_is_using_msix(sc)) {
   5297 		uint32_t ivar;
   5298 		struct wm_queue *wmq;
   5299 		int qid, qintr_idx;
   5300 
   5301 		if (sc->sc_type == WM_T_82575) {
   5302 			/* Interrupt control */
   5303 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5304 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5305 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5306 
   5307 			/* TX and RX */
   5308 			for (i = 0; i < sc->sc_nqueues; i++) {
   5309 				wmq = &sc->sc_queue[i];
   5310 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5311 				    EITR_TX_QUEUE(wmq->wmq_id)
   5312 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5313 			}
   5314 			/* Link status */
   5315 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5316 			    EITR_OTHER);
   5317 		} else if (sc->sc_type == WM_T_82574) {
   5318 			/* Interrupt control */
   5319 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5320 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5321 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5322 
   5323 			/*
   5324 			 * workaround issue with spurious interrupts
   5325 			 * in MSI-X mode.
   5326 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5327 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5328 			 */
   5329 			reg = CSR_READ(sc, WMREG_RFCTL);
   5330 			reg |= WMREG_RFCTL_ACKDIS;
   5331 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5332 
   5333 			ivar = 0;
   5334 			/* TX and RX */
   5335 			for (i = 0; i < sc->sc_nqueues; i++) {
   5336 				wmq = &sc->sc_queue[i];
   5337 				qid = wmq->wmq_id;
   5338 				qintr_idx = wmq->wmq_intr_idx;
   5339 
   5340 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5341 				    IVAR_TX_MASK_Q_82574(qid));
   5342 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5343 				    IVAR_RX_MASK_Q_82574(qid));
   5344 			}
   5345 			/* Link status */
   5346 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5347 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5348 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5349 		} else {
   5350 			/* Interrupt control */
   5351 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5352 			    | GPIE_EIAME | GPIE_PBA);
   5353 
   5354 			switch (sc->sc_type) {
   5355 			case WM_T_82580:
   5356 			case WM_T_I350:
   5357 			case WM_T_I354:
   5358 			case WM_T_I210:
   5359 			case WM_T_I211:
   5360 				/* TX and RX */
   5361 				for (i = 0; i < sc->sc_nqueues; i++) {
   5362 					wmq = &sc->sc_queue[i];
   5363 					qid = wmq->wmq_id;
   5364 					qintr_idx = wmq->wmq_intr_idx;
   5365 
   5366 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5367 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5368 					ivar |= __SHIFTIN((qintr_idx
   5369 						| IVAR_VALID),
   5370 					    IVAR_TX_MASK_Q(qid));
   5371 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5372 					ivar |= __SHIFTIN((qintr_idx
   5373 						| IVAR_VALID),
   5374 					    IVAR_RX_MASK_Q(qid));
   5375 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5376 				}
   5377 				break;
   5378 			case WM_T_82576:
   5379 				/* TX and RX */
   5380 				for (i = 0; i < sc->sc_nqueues; i++) {
   5381 					wmq = &sc->sc_queue[i];
   5382 					qid = wmq->wmq_id;
   5383 					qintr_idx = wmq->wmq_intr_idx;
   5384 
   5385 					ivar = CSR_READ(sc,
   5386 					    WMREG_IVAR_Q_82576(qid));
   5387 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5388 					ivar |= __SHIFTIN((qintr_idx
   5389 						| IVAR_VALID),
   5390 					    IVAR_TX_MASK_Q_82576(qid));
   5391 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5392 					ivar |= __SHIFTIN((qintr_idx
   5393 						| IVAR_VALID),
   5394 					    IVAR_RX_MASK_Q_82576(qid));
   5395 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5396 					    ivar);
   5397 				}
   5398 				break;
   5399 			default:
   5400 				break;
   5401 			}
   5402 
   5403 			/* Link status */
   5404 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5405 			    IVAR_MISC_OTHER);
   5406 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5407 		}
   5408 
   5409 		if (wm_is_using_multiqueue(sc)) {
   5410 			wm_init_rss(sc);
   5411 
   5412 			/*
   5413 			** NOTE: Receive Full-Packet Checksum Offload
   5414 			** is mutually exclusive with Multiqueue. However
   5415 			** this is not the same as TCP/IP checksums which
   5416 			** still work.
   5417 			*/
   5418 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5419 			reg |= RXCSUM_PCSD;
   5420 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5421 		}
   5422 	}
   5423 
   5424 	/* Set up the interrupt registers. */
   5425 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5426 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5427 	    ICR_RXO | ICR_RXT0;
   5428 	if (wm_is_using_msix(sc)) {
   5429 		uint32_t mask;
   5430 		struct wm_queue *wmq;
   5431 
   5432 		switch (sc->sc_type) {
   5433 		case WM_T_82574:
   5434 			mask = 0;
   5435 			for (i = 0; i < sc->sc_nqueues; i++) {
   5436 				wmq = &sc->sc_queue[i];
   5437 				mask |= ICR_TXQ(wmq->wmq_id);
   5438 				mask |= ICR_RXQ(wmq->wmq_id);
   5439 			}
   5440 			mask |= ICR_OTHER;
   5441 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5442 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5443 			break;
   5444 		default:
   5445 			if (sc->sc_type == WM_T_82575) {
   5446 				mask = 0;
   5447 				for (i = 0; i < sc->sc_nqueues; i++) {
   5448 					wmq = &sc->sc_queue[i];
   5449 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5450 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5451 				}
   5452 				mask |= EITR_OTHER;
   5453 			} else {
   5454 				mask = 0;
   5455 				for (i = 0; i < sc->sc_nqueues; i++) {
   5456 					wmq = &sc->sc_queue[i];
   5457 					mask |= 1 << wmq->wmq_intr_idx;
   5458 				}
   5459 				mask |= 1 << sc->sc_link_intr_idx;
   5460 			}
   5461 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5462 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5463 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5464 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5465 			break;
   5466 		}
   5467 	} else
   5468 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5469 
   5470 	/* Set up the inter-packet gap. */
   5471 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5472 
   5473 	if (sc->sc_type >= WM_T_82543) {
   5474 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5475 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5476 			wm_itrs_writereg(sc, wmq);
   5477 		}
   5478 		/*
   5479 		 * Link interrupts occur much less than TX
   5480 		 * interrupts and RX interrupts. So, we don't
   5481 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5482 		 * FreeBSD's if_igb.
   5483 		 */
   5484 	}
   5485 
   5486 	/* Set the VLAN ethernetype. */
   5487 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5488 
   5489 	/*
   5490 	 * Set up the transmit control register; we start out with
   5491 	 * a collision distance suitable for FDX, but update it whe
   5492 	 * we resolve the media type.
   5493 	 */
   5494 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5495 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5496 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5497 	if (sc->sc_type >= WM_T_82571)
   5498 		sc->sc_tctl |= TCTL_MULR;
   5499 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5500 
   5501 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5502 		/* Write TDT after TCTL.EN is set. See the document. */
   5503 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5504 	}
   5505 
   5506 	if (sc->sc_type == WM_T_80003) {
   5507 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5508 		reg &= ~TCTL_EXT_GCEX_MASK;
   5509 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5510 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5511 	}
   5512 
   5513 	/* Set the media. */
   5514 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5515 		goto out;
   5516 
   5517 	/* Configure for OS presence */
   5518 	wm_init_manageability(sc);
   5519 
   5520 	/*
   5521 	 * Set up the receive control register; we actually program
   5522 	 * the register when we set the receive filter.  Use multicast
   5523 	 * address offset type 0.
   5524 	 *
   5525 	 * Only the i82544 has the ability to strip the incoming
   5526 	 * CRC, so we don't enable that feature.
   5527 	 */
   5528 	sc->sc_mchash_type = 0;
   5529 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5530 	    | RCTL_MO(sc->sc_mchash_type);
   5531 
   5532 	/*
   5533 	 * 82574 use one buffer extended Rx descriptor.
   5534 	 */
   5535 	if (sc->sc_type == WM_T_82574)
   5536 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5537 
   5538 	/*
   5539 	 * The I350 has a bug where it always strips the CRC whether
   5540 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5541 	 */
   5542 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5543 	    || (sc->sc_type == WM_T_I210))
   5544 		sc->sc_rctl |= RCTL_SECRC;
   5545 
   5546 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5547 	    && (ifp->if_mtu > ETHERMTU)) {
   5548 		sc->sc_rctl |= RCTL_LPE;
   5549 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5550 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5551 	}
   5552 
   5553 	if (MCLBYTES == 2048) {
   5554 		sc->sc_rctl |= RCTL_2k;
   5555 	} else {
   5556 		if (sc->sc_type >= WM_T_82543) {
   5557 			switch (MCLBYTES) {
   5558 			case 4096:
   5559 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5560 				break;
   5561 			case 8192:
   5562 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5563 				break;
   5564 			case 16384:
   5565 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5566 				break;
   5567 			default:
   5568 				panic("wm_init: MCLBYTES %d unsupported",
   5569 				    MCLBYTES);
   5570 				break;
   5571 			}
   5572 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5573 	}
   5574 
   5575 	/* Enable ECC */
   5576 	switch (sc->sc_type) {
   5577 	case WM_T_82571:
   5578 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5579 		reg |= PBA_ECC_CORR_EN;
   5580 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5581 		break;
   5582 	case WM_T_PCH_LPT:
   5583 	case WM_T_PCH_SPT:
   5584 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5585 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5586 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5587 
   5588 		sc->sc_ctrl |= CTRL_MEHE;
   5589 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5590 		break;
   5591 	default:
   5592 		break;
   5593 	}
   5594 
   5595 	/* On 575 and later set RDT only if RX enabled */
   5596 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5597 		int qidx;
   5598 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5599 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5600 			for (i = 0; i < WM_NRXDESC; i++) {
   5601 				mutex_enter(rxq->rxq_lock);
   5602 				wm_init_rxdesc(rxq, i);
   5603 				mutex_exit(rxq->rxq_lock);
   5604 
   5605 			}
   5606 		}
   5607 	}
   5608 
   5609 	/* Set the receive filter. */
   5610 	wm_set_filter(sc);
   5611 
   5612 	wm_turnon(sc);
   5613 
   5614 	/* Start the one second link check clock. */
   5615 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5616 
   5617 	/* ...all done! */
   5618 	ifp->if_flags |= IFF_RUNNING;
   5619 	ifp->if_flags &= ~IFF_OACTIVE;
   5620 
   5621  out:
   5622 	sc->sc_if_flags = ifp->if_flags;
   5623 	if (error)
   5624 		log(LOG_ERR, "%s: interface not running\n",
   5625 		    device_xname(sc->sc_dev));
   5626 	return error;
   5627 }
   5628 
   5629 /*
   5630  * wm_stop:		[ifnet interface function]
   5631  *
   5632  *	Stop transmission on the interface.
   5633  */
   5634 static void
   5635 wm_stop(struct ifnet *ifp, int disable)
   5636 {
   5637 	struct wm_softc *sc = ifp->if_softc;
   5638 
   5639 	WM_CORE_LOCK(sc);
   5640 	wm_stop_locked(ifp, disable);
   5641 	WM_CORE_UNLOCK(sc);
   5642 }
   5643 
   5644 static void
   5645 wm_stop_locked(struct ifnet *ifp, int disable)
   5646 {
   5647 	struct wm_softc *sc = ifp->if_softc;
   5648 	struct wm_txsoft *txs;
   5649 	int i, qidx;
   5650 
   5651 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5652 		device_xname(sc->sc_dev), __func__));
   5653 	KASSERT(WM_CORE_LOCKED(sc));
   5654 
   5655 	wm_turnoff(sc);
   5656 
   5657 	/* Stop the one second clock. */
   5658 	callout_stop(&sc->sc_tick_ch);
   5659 
   5660 	/* Stop the 82547 Tx FIFO stall check timer. */
   5661 	if (sc->sc_type == WM_T_82547)
   5662 		callout_stop(&sc->sc_txfifo_ch);
   5663 
   5664 	if (sc->sc_flags & WM_F_HAS_MII) {
   5665 		/* Down the MII. */
   5666 		mii_down(&sc->sc_mii);
   5667 	} else {
   5668 #if 0
   5669 		/* Should we clear PHY's status properly? */
   5670 		wm_reset(sc);
   5671 #endif
   5672 	}
   5673 
   5674 	/* Stop the transmit and receive processes. */
   5675 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5676 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5677 	sc->sc_rctl &= ~RCTL_EN;
   5678 
   5679 	/*
   5680 	 * Clear the interrupt mask to ensure the device cannot assert its
   5681 	 * interrupt line.
   5682 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5683 	 * service any currently pending or shared interrupt.
   5684 	 */
   5685 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5686 	sc->sc_icr = 0;
   5687 	if (wm_is_using_msix(sc)) {
   5688 		if (sc->sc_type != WM_T_82574) {
   5689 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5690 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5691 		} else
   5692 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5693 	}
   5694 
   5695 	/* Release any queued transmit buffers. */
   5696 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5697 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5698 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5699 		mutex_enter(txq->txq_lock);
   5700 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5701 			txs = &txq->txq_soft[i];
   5702 			if (txs->txs_mbuf != NULL) {
   5703 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5704 				m_freem(txs->txs_mbuf);
   5705 				txs->txs_mbuf = NULL;
   5706 			}
   5707 		}
   5708 		mutex_exit(txq->txq_lock);
   5709 	}
   5710 
   5711 	/* Mark the interface as down and cancel the watchdog timer. */
   5712 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5713 	ifp->if_timer = 0;
   5714 
   5715 	if (disable) {
   5716 		for (i = 0; i < sc->sc_nqueues; i++) {
   5717 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5718 			mutex_enter(rxq->rxq_lock);
   5719 			wm_rxdrain(rxq);
   5720 			mutex_exit(rxq->rxq_lock);
   5721 		}
   5722 	}
   5723 
   5724 #if 0 /* notyet */
   5725 	if (sc->sc_type >= WM_T_82544)
   5726 		CSR_WRITE(sc, WMREG_WUC, 0);
   5727 #endif
   5728 }
   5729 
   5730 static void
   5731 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5732 {
   5733 	struct mbuf *m;
   5734 	int i;
   5735 
   5736 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5737 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5738 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5739 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5740 		    m->m_data, m->m_len, m->m_flags);
   5741 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5742 	    i, i == 1 ? "" : "s");
   5743 }
   5744 
   5745 /*
   5746  * wm_82547_txfifo_stall:
   5747  *
   5748  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5749  *	reset the FIFO pointers, and restart packet transmission.
   5750  */
   5751 static void
   5752 wm_82547_txfifo_stall(void *arg)
   5753 {
   5754 	struct wm_softc *sc = arg;
   5755 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5756 
   5757 	mutex_enter(txq->txq_lock);
   5758 
   5759 	if (txq->txq_stopping)
   5760 		goto out;
   5761 
   5762 	if (txq->txq_fifo_stall) {
   5763 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5764 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5765 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5766 			/*
   5767 			 * Packets have drained.  Stop transmitter, reset
   5768 			 * FIFO pointers, restart transmitter, and kick
   5769 			 * the packet queue.
   5770 			 */
   5771 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5772 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5773 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5774 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5775 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5776 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5777 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5778 			CSR_WRITE_FLUSH(sc);
   5779 
   5780 			txq->txq_fifo_head = 0;
   5781 			txq->txq_fifo_stall = 0;
   5782 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5783 		} else {
   5784 			/*
   5785 			 * Still waiting for packets to drain; try again in
   5786 			 * another tick.
   5787 			 */
   5788 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5789 		}
   5790 	}
   5791 
   5792 out:
   5793 	mutex_exit(txq->txq_lock);
   5794 }
   5795 
   5796 /*
   5797  * wm_82547_txfifo_bugchk:
   5798  *
   5799  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5800  *	prevent enqueueing a packet that would wrap around the end
   5801  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5802  *
   5803  *	We do this by checking the amount of space before the end
   5804  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5805  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5806  *	the internal FIFO pointers to the beginning, and restart
   5807  *	transmission on the interface.
   5808  */
   5809 #define	WM_FIFO_HDR		0x10
   5810 #define	WM_82547_PAD_LEN	0x3e0
   5811 static int
   5812 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5813 {
   5814 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5815 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5816 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5817 
   5818 	/* Just return if already stalled. */
   5819 	if (txq->txq_fifo_stall)
   5820 		return 1;
   5821 
   5822 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5823 		/* Stall only occurs in half-duplex mode. */
   5824 		goto send_packet;
   5825 	}
   5826 
   5827 	if (len >= WM_82547_PAD_LEN + space) {
   5828 		txq->txq_fifo_stall = 1;
   5829 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5830 		return 1;
   5831 	}
   5832 
   5833  send_packet:
   5834 	txq->txq_fifo_head += len;
   5835 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5836 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5837 
   5838 	return 0;
   5839 }
   5840 
   5841 static int
   5842 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5843 {
   5844 	int error;
   5845 
   5846 	/*
   5847 	 * Allocate the control data structures, and create and load the
   5848 	 * DMA map for it.
   5849 	 *
   5850 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5851 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5852 	 * both sets within the same 4G segment.
   5853 	 */
   5854 	if (sc->sc_type < WM_T_82544)
   5855 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5856 	else
   5857 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5858 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5859 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5860 	else
   5861 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5862 
   5863 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5864 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5865 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5866 		aprint_error_dev(sc->sc_dev,
   5867 		    "unable to allocate TX control data, error = %d\n",
   5868 		    error);
   5869 		goto fail_0;
   5870 	}
   5871 
   5872 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5873 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5874 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5875 		aprint_error_dev(sc->sc_dev,
   5876 		    "unable to map TX control data, error = %d\n", error);
   5877 		goto fail_1;
   5878 	}
   5879 
   5880 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5881 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5882 		aprint_error_dev(sc->sc_dev,
   5883 		    "unable to create TX control data DMA map, error = %d\n",
   5884 		    error);
   5885 		goto fail_2;
   5886 	}
   5887 
   5888 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5889 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5890 		aprint_error_dev(sc->sc_dev,
   5891 		    "unable to load TX control data DMA map, error = %d\n",
   5892 		    error);
   5893 		goto fail_3;
   5894 	}
   5895 
   5896 	return 0;
   5897 
   5898  fail_3:
   5899 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5900  fail_2:
   5901 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5902 	    WM_TXDESCS_SIZE(txq));
   5903  fail_1:
   5904 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5905  fail_0:
   5906 	return error;
   5907 }
   5908 
   5909 static void
   5910 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5911 {
   5912 
   5913 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5914 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5915 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5916 	    WM_TXDESCS_SIZE(txq));
   5917 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5918 }
   5919 
   5920 static int
   5921 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5922 {
   5923 	int error;
   5924 	size_t rxq_descs_size;
   5925 
   5926 	/*
   5927 	 * Allocate the control data structures, and create and load the
   5928 	 * DMA map for it.
   5929 	 *
   5930 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5931 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5932 	 * both sets within the same 4G segment.
   5933 	 */
   5934 	rxq->rxq_ndesc = WM_NRXDESC;
   5935 	if (sc->sc_type == WM_T_82574)
   5936 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5937 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5938 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5939 	else
   5940 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5941 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5942 
   5943 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5944 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5945 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5946 		aprint_error_dev(sc->sc_dev,
   5947 		    "unable to allocate RX control data, error = %d\n",
   5948 		    error);
   5949 		goto fail_0;
   5950 	}
   5951 
   5952 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5953 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5954 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5955 		aprint_error_dev(sc->sc_dev,
   5956 		    "unable to map RX control data, error = %d\n", error);
   5957 		goto fail_1;
   5958 	}
   5959 
   5960 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5961 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5962 		aprint_error_dev(sc->sc_dev,
   5963 		    "unable to create RX control data DMA map, error = %d\n",
   5964 		    error);
   5965 		goto fail_2;
   5966 	}
   5967 
   5968 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5969 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5970 		aprint_error_dev(sc->sc_dev,
   5971 		    "unable to load RX control data DMA map, error = %d\n",
   5972 		    error);
   5973 		goto fail_3;
   5974 	}
   5975 
   5976 	return 0;
   5977 
   5978  fail_3:
   5979 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5980  fail_2:
   5981 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5982 	    rxq_descs_size);
   5983  fail_1:
   5984 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5985  fail_0:
   5986 	return error;
   5987 }
   5988 
   5989 static void
   5990 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5991 {
   5992 
   5993 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5994 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5995 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5996 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5997 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5998 }
   5999 
   6000 
   6001 static int
   6002 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6003 {
   6004 	int i, error;
   6005 
   6006 	/* Create the transmit buffer DMA maps. */
   6007 	WM_TXQUEUELEN(txq) =
   6008 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6009 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6010 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6011 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6012 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6013 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6014 			aprint_error_dev(sc->sc_dev,
   6015 			    "unable to create Tx DMA map %d, error = %d\n",
   6016 			    i, error);
   6017 			goto fail;
   6018 		}
   6019 	}
   6020 
   6021 	return 0;
   6022 
   6023  fail:
   6024 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6025 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6026 			bus_dmamap_destroy(sc->sc_dmat,
   6027 			    txq->txq_soft[i].txs_dmamap);
   6028 	}
   6029 	return error;
   6030 }
   6031 
   6032 static void
   6033 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6034 {
   6035 	int i;
   6036 
   6037 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6038 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6039 			bus_dmamap_destroy(sc->sc_dmat,
   6040 			    txq->txq_soft[i].txs_dmamap);
   6041 	}
   6042 }
   6043 
   6044 static int
   6045 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6046 {
   6047 	int i, error;
   6048 
   6049 	/* Create the receive buffer DMA maps. */
   6050 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6051 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6052 			    MCLBYTES, 0, 0,
   6053 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6054 			aprint_error_dev(sc->sc_dev,
   6055 			    "unable to create Rx DMA map %d error = %d\n",
   6056 			    i, error);
   6057 			goto fail;
   6058 		}
   6059 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6060 	}
   6061 
   6062 	return 0;
   6063 
   6064  fail:
   6065 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6066 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6067 			bus_dmamap_destroy(sc->sc_dmat,
   6068 			    rxq->rxq_soft[i].rxs_dmamap);
   6069 	}
   6070 	return error;
   6071 }
   6072 
   6073 static void
   6074 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6075 {
   6076 	int i;
   6077 
   6078 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6079 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6080 			bus_dmamap_destroy(sc->sc_dmat,
   6081 			    rxq->rxq_soft[i].rxs_dmamap);
   6082 	}
   6083 }
   6084 
   6085 /*
   6086  * wm_alloc_quques:
   6087  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6088  */
   6089 static int
   6090 wm_alloc_txrx_queues(struct wm_softc *sc)
   6091 {
   6092 	int i, error, tx_done, rx_done;
   6093 
   6094 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6095 	    KM_SLEEP);
   6096 	if (sc->sc_queue == NULL) {
   6097 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6098 		error = ENOMEM;
   6099 		goto fail_0;
   6100 	}
   6101 
   6102 	/*
   6103 	 * For transmission
   6104 	 */
   6105 	error = 0;
   6106 	tx_done = 0;
   6107 	for (i = 0; i < sc->sc_nqueues; i++) {
   6108 #ifdef WM_EVENT_COUNTERS
   6109 		int j;
   6110 		const char *xname;
   6111 #endif
   6112 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6113 		txq->txq_sc = sc;
   6114 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6115 
   6116 		error = wm_alloc_tx_descs(sc, txq);
   6117 		if (error)
   6118 			break;
   6119 		error = wm_alloc_tx_buffer(sc, txq);
   6120 		if (error) {
   6121 			wm_free_tx_descs(sc, txq);
   6122 			break;
   6123 		}
   6124 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6125 		if (txq->txq_interq == NULL) {
   6126 			wm_free_tx_descs(sc, txq);
   6127 			wm_free_tx_buffer(sc, txq);
   6128 			error = ENOMEM;
   6129 			break;
   6130 		}
   6131 
   6132 #ifdef WM_EVENT_COUNTERS
   6133 		xname = device_xname(sc->sc_dev);
   6134 
   6135 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6136 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6137 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6138 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6139 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6140 
   6141 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6142 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6143 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6144 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6145 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6146 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6147 
   6148 		for (j = 0; j < WM_NTXSEGS; j++) {
   6149 			snprintf(txq->txq_txseg_evcnt_names[j],
   6150 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6151 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6152 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6153 		}
   6154 
   6155 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6156 
   6157 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6158 #endif /* WM_EVENT_COUNTERS */
   6159 
   6160 		tx_done++;
   6161 	}
   6162 	if (error)
   6163 		goto fail_1;
   6164 
   6165 	/*
   6166 	 * For recieve
   6167 	 */
   6168 	error = 0;
   6169 	rx_done = 0;
   6170 	for (i = 0; i < sc->sc_nqueues; i++) {
   6171 #ifdef WM_EVENT_COUNTERS
   6172 		const char *xname;
   6173 #endif
   6174 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6175 		rxq->rxq_sc = sc;
   6176 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6177 
   6178 		error = wm_alloc_rx_descs(sc, rxq);
   6179 		if (error)
   6180 			break;
   6181 
   6182 		error = wm_alloc_rx_buffer(sc, rxq);
   6183 		if (error) {
   6184 			wm_free_rx_descs(sc, rxq);
   6185 			break;
   6186 		}
   6187 
   6188 #ifdef WM_EVENT_COUNTERS
   6189 		xname = device_xname(sc->sc_dev);
   6190 
   6191 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6192 
   6193 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6194 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6195 #endif /* WM_EVENT_COUNTERS */
   6196 
   6197 		rx_done++;
   6198 	}
   6199 	if (error)
   6200 		goto fail_2;
   6201 
   6202 	return 0;
   6203 
   6204  fail_2:
   6205 	for (i = 0; i < rx_done; i++) {
   6206 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6207 		wm_free_rx_buffer(sc, rxq);
   6208 		wm_free_rx_descs(sc, rxq);
   6209 		if (rxq->rxq_lock)
   6210 			mutex_obj_free(rxq->rxq_lock);
   6211 	}
   6212  fail_1:
   6213 	for (i = 0; i < tx_done; i++) {
   6214 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6215 		pcq_destroy(txq->txq_interq);
   6216 		wm_free_tx_buffer(sc, txq);
   6217 		wm_free_tx_descs(sc, txq);
   6218 		if (txq->txq_lock)
   6219 			mutex_obj_free(txq->txq_lock);
   6220 	}
   6221 
   6222 	kmem_free(sc->sc_queue,
   6223 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6224  fail_0:
   6225 	return error;
   6226 }
   6227 
   6228 /*
   6229  * wm_free_quques:
   6230  *	Free {tx,rx}descs and {tx,rx} buffers
   6231  */
   6232 static void
   6233 wm_free_txrx_queues(struct wm_softc *sc)
   6234 {
   6235 	int i;
   6236 
   6237 	for (i = 0; i < sc->sc_nqueues; i++) {
   6238 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6239 
   6240 #ifdef WM_EVENT_COUNTERS
   6241 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6242 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6243 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6244 #endif /* WM_EVENT_COUNTERS */
   6245 
   6246 		wm_free_rx_buffer(sc, rxq);
   6247 		wm_free_rx_descs(sc, rxq);
   6248 		if (rxq->rxq_lock)
   6249 			mutex_obj_free(rxq->rxq_lock);
   6250 	}
   6251 
   6252 	for (i = 0; i < sc->sc_nqueues; i++) {
   6253 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6254 		struct mbuf *m;
   6255 #ifdef WM_EVENT_COUNTERS
   6256 		int j;
   6257 
   6258 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6259 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6260 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6261 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6262 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6263 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6264 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6265 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6266 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6267 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6268 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6269 
   6270 		for (j = 0; j < WM_NTXSEGS; j++)
   6271 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6272 
   6273 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6274 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6275 #endif /* WM_EVENT_COUNTERS */
   6276 
   6277 		/* drain txq_interq */
   6278 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6279 			m_freem(m);
   6280 		pcq_destroy(txq->txq_interq);
   6281 
   6282 		wm_free_tx_buffer(sc, txq);
   6283 		wm_free_tx_descs(sc, txq);
   6284 		if (txq->txq_lock)
   6285 			mutex_obj_free(txq->txq_lock);
   6286 	}
   6287 
   6288 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6289 }
   6290 
   6291 static void
   6292 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6293 {
   6294 
   6295 	KASSERT(mutex_owned(txq->txq_lock));
   6296 
   6297 	/* Initialize the transmit descriptor ring. */
   6298 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6299 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6300 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6301 	txq->txq_free = WM_NTXDESC(txq);
   6302 	txq->txq_next = 0;
   6303 }
   6304 
   6305 static void
   6306 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6307     struct wm_txqueue *txq)
   6308 {
   6309 
   6310 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6311 		device_xname(sc->sc_dev), __func__));
   6312 	KASSERT(mutex_owned(txq->txq_lock));
   6313 
   6314 	if (sc->sc_type < WM_T_82543) {
   6315 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6316 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6317 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6318 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6319 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6320 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6321 	} else {
   6322 		int qid = wmq->wmq_id;
   6323 
   6324 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6325 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6326 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6327 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6328 
   6329 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6330 			/*
   6331 			 * Don't write TDT before TCTL.EN is set.
   6332 			 * See the document.
   6333 			 */
   6334 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6335 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6336 			    | TXDCTL_WTHRESH(0));
   6337 		else {
   6338 			/* XXX should update with AIM? */
   6339 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6340 			if (sc->sc_type >= WM_T_82540) {
   6341 				/* should be same */
   6342 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6343 			}
   6344 
   6345 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6346 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6347 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6348 		}
   6349 	}
   6350 }
   6351 
   6352 static void
   6353 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6354 {
   6355 	int i;
   6356 
   6357 	KASSERT(mutex_owned(txq->txq_lock));
   6358 
   6359 	/* Initialize the transmit job descriptors. */
   6360 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6361 		txq->txq_soft[i].txs_mbuf = NULL;
   6362 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6363 	txq->txq_snext = 0;
   6364 	txq->txq_sdirty = 0;
   6365 }
   6366 
   6367 static void
   6368 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6369     struct wm_txqueue *txq)
   6370 {
   6371 
   6372 	KASSERT(mutex_owned(txq->txq_lock));
   6373 
   6374 	/*
   6375 	 * Set up some register offsets that are different between
   6376 	 * the i82542 and the i82543 and later chips.
   6377 	 */
   6378 	if (sc->sc_type < WM_T_82543)
   6379 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6380 	else
   6381 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6382 
   6383 	wm_init_tx_descs(sc, txq);
   6384 	wm_init_tx_regs(sc, wmq, txq);
   6385 	wm_init_tx_buffer(sc, txq);
   6386 }
   6387 
   6388 static void
   6389 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6390     struct wm_rxqueue *rxq)
   6391 {
   6392 
   6393 	KASSERT(mutex_owned(rxq->rxq_lock));
   6394 
   6395 	/*
   6396 	 * Initialize the receive descriptor and receive job
   6397 	 * descriptor rings.
   6398 	 */
   6399 	if (sc->sc_type < WM_T_82543) {
   6400 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6401 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6402 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6403 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6404 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6405 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6406 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6407 
   6408 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6409 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6410 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6411 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6412 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6413 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6414 	} else {
   6415 		int qid = wmq->wmq_id;
   6416 
   6417 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6418 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6419 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6420 
   6421 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6422 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6423 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6424 
   6425 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6426 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6427 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6428 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6429 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6430 			    | RXDCTL_WTHRESH(1));
   6431 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6432 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6433 		} else {
   6434 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6435 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6436 			/* XXX should update with AIM? */
   6437 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6438 			/* MUST be same */
   6439 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6440 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6441 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6442 		}
   6443 	}
   6444 }
   6445 
   6446 static int
   6447 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6448 {
   6449 	struct wm_rxsoft *rxs;
   6450 	int error, i;
   6451 
   6452 	KASSERT(mutex_owned(rxq->rxq_lock));
   6453 
   6454 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6455 		rxs = &rxq->rxq_soft[i];
   6456 		if (rxs->rxs_mbuf == NULL) {
   6457 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6458 				log(LOG_ERR, "%s: unable to allocate or map "
   6459 				    "rx buffer %d, error = %d\n",
   6460 				    device_xname(sc->sc_dev), i, error);
   6461 				/*
   6462 				 * XXX Should attempt to run with fewer receive
   6463 				 * XXX buffers instead of just failing.
   6464 				 */
   6465 				wm_rxdrain(rxq);
   6466 				return ENOMEM;
   6467 			}
   6468 		} else {
   6469 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6470 				wm_init_rxdesc(rxq, i);
   6471 			/*
   6472 			 * For 82575 and newer device, the RX descriptors
   6473 			 * must be initialized after the setting of RCTL.EN in
   6474 			 * wm_set_filter()
   6475 			 */
   6476 		}
   6477 	}
   6478 	rxq->rxq_ptr = 0;
   6479 	rxq->rxq_discard = 0;
   6480 	WM_RXCHAIN_RESET(rxq);
   6481 
   6482 	return 0;
   6483 }
   6484 
   6485 static int
   6486 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6487     struct wm_rxqueue *rxq)
   6488 {
   6489 
   6490 	KASSERT(mutex_owned(rxq->rxq_lock));
   6491 
   6492 	/*
   6493 	 * Set up some register offsets that are different between
   6494 	 * the i82542 and the i82543 and later chips.
   6495 	 */
   6496 	if (sc->sc_type < WM_T_82543)
   6497 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6498 	else
   6499 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6500 
   6501 	wm_init_rx_regs(sc, wmq, rxq);
   6502 	return wm_init_rx_buffer(sc, rxq);
   6503 }
   6504 
   6505 /*
   6506  * wm_init_quques:
   6507  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6508  */
   6509 static int
   6510 wm_init_txrx_queues(struct wm_softc *sc)
   6511 {
   6512 	int i, error = 0;
   6513 
   6514 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6515 		device_xname(sc->sc_dev), __func__));
   6516 
   6517 	for (i = 0; i < sc->sc_nqueues; i++) {
   6518 		struct wm_queue *wmq = &sc->sc_queue[i];
   6519 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6520 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6521 
   6522 		/*
   6523 		 * TODO
   6524 		 * Currently, use constant variable instead of AIM.
   6525 		 * Furthermore, the interrupt interval of multiqueue which use
   6526 		 * polling mode is less than default value.
   6527 		 * More tuning and AIM are required.
   6528 		 */
   6529 		if (wm_is_using_multiqueue(sc))
   6530 			wmq->wmq_itr = 50;
   6531 		else
   6532 			wmq->wmq_itr = sc->sc_itr_init;
   6533 		wmq->wmq_set_itr = true;
   6534 
   6535 		mutex_enter(txq->txq_lock);
   6536 		wm_init_tx_queue(sc, wmq, txq);
   6537 		mutex_exit(txq->txq_lock);
   6538 
   6539 		mutex_enter(rxq->rxq_lock);
   6540 		error = wm_init_rx_queue(sc, wmq, rxq);
   6541 		mutex_exit(rxq->rxq_lock);
   6542 		if (error)
   6543 			break;
   6544 	}
   6545 
   6546 	return error;
   6547 }
   6548 
   6549 /*
   6550  * wm_tx_offload:
   6551  *
   6552  *	Set up TCP/IP checksumming parameters for the
   6553  *	specified packet.
   6554  */
   6555 static int
   6556 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6557     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6558 {
   6559 	struct mbuf *m0 = txs->txs_mbuf;
   6560 	struct livengood_tcpip_ctxdesc *t;
   6561 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6562 	uint32_t ipcse;
   6563 	struct ether_header *eh;
   6564 	int offset, iphl;
   6565 	uint8_t fields;
   6566 
   6567 	/*
   6568 	 * XXX It would be nice if the mbuf pkthdr had offset
   6569 	 * fields for the protocol headers.
   6570 	 */
   6571 
   6572 	eh = mtod(m0, struct ether_header *);
   6573 	switch (htons(eh->ether_type)) {
   6574 	case ETHERTYPE_IP:
   6575 	case ETHERTYPE_IPV6:
   6576 		offset = ETHER_HDR_LEN;
   6577 		break;
   6578 
   6579 	case ETHERTYPE_VLAN:
   6580 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6581 		break;
   6582 
   6583 	default:
   6584 		/*
   6585 		 * Don't support this protocol or encapsulation.
   6586 		 */
   6587 		*fieldsp = 0;
   6588 		*cmdp = 0;
   6589 		return 0;
   6590 	}
   6591 
   6592 	if ((m0->m_pkthdr.csum_flags &
   6593 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6594 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6595 	} else {
   6596 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6597 	}
   6598 	ipcse = offset + iphl - 1;
   6599 
   6600 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6601 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6602 	seg = 0;
   6603 	fields = 0;
   6604 
   6605 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6606 		int hlen = offset + iphl;
   6607 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6608 
   6609 		if (__predict_false(m0->m_len <
   6610 				    (hlen + sizeof(struct tcphdr)))) {
   6611 			/*
   6612 			 * TCP/IP headers are not in the first mbuf; we need
   6613 			 * to do this the slow and painful way.  Let's just
   6614 			 * hope this doesn't happen very often.
   6615 			 */
   6616 			struct tcphdr th;
   6617 
   6618 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6619 
   6620 			m_copydata(m0, hlen, sizeof(th), &th);
   6621 			if (v4) {
   6622 				struct ip ip;
   6623 
   6624 				m_copydata(m0, offset, sizeof(ip), &ip);
   6625 				ip.ip_len = 0;
   6626 				m_copyback(m0,
   6627 				    offset + offsetof(struct ip, ip_len),
   6628 				    sizeof(ip.ip_len), &ip.ip_len);
   6629 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6630 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6631 			} else {
   6632 				struct ip6_hdr ip6;
   6633 
   6634 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6635 				ip6.ip6_plen = 0;
   6636 				m_copyback(m0,
   6637 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6638 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6639 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6640 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6641 			}
   6642 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6643 			    sizeof(th.th_sum), &th.th_sum);
   6644 
   6645 			hlen += th.th_off << 2;
   6646 		} else {
   6647 			/*
   6648 			 * TCP/IP headers are in the first mbuf; we can do
   6649 			 * this the easy way.
   6650 			 */
   6651 			struct tcphdr *th;
   6652 
   6653 			if (v4) {
   6654 				struct ip *ip =
   6655 				    (void *)(mtod(m0, char *) + offset);
   6656 				th = (void *)(mtod(m0, char *) + hlen);
   6657 
   6658 				ip->ip_len = 0;
   6659 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6660 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6661 			} else {
   6662 				struct ip6_hdr *ip6 =
   6663 				    (void *)(mtod(m0, char *) + offset);
   6664 				th = (void *)(mtod(m0, char *) + hlen);
   6665 
   6666 				ip6->ip6_plen = 0;
   6667 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6668 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6669 			}
   6670 			hlen += th->th_off << 2;
   6671 		}
   6672 
   6673 		if (v4) {
   6674 			WM_Q_EVCNT_INCR(txq, txtso);
   6675 			cmdlen |= WTX_TCPIP_CMD_IP;
   6676 		} else {
   6677 			WM_Q_EVCNT_INCR(txq, txtso6);
   6678 			ipcse = 0;
   6679 		}
   6680 		cmd |= WTX_TCPIP_CMD_TSE;
   6681 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6682 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6683 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6684 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6685 	}
   6686 
   6687 	/*
   6688 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6689 	 * offload feature, if we load the context descriptor, we
   6690 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6691 	 */
   6692 
   6693 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6694 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6695 	    WTX_TCPIP_IPCSE(ipcse);
   6696 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6697 		WM_Q_EVCNT_INCR(txq, txipsum);
   6698 		fields |= WTX_IXSM;
   6699 	}
   6700 
   6701 	offset += iphl;
   6702 
   6703 	if (m0->m_pkthdr.csum_flags &
   6704 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6705 		WM_Q_EVCNT_INCR(txq, txtusum);
   6706 		fields |= WTX_TXSM;
   6707 		tucs = WTX_TCPIP_TUCSS(offset) |
   6708 		    WTX_TCPIP_TUCSO(offset +
   6709 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6710 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6711 	} else if ((m0->m_pkthdr.csum_flags &
   6712 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6713 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6714 		fields |= WTX_TXSM;
   6715 		tucs = WTX_TCPIP_TUCSS(offset) |
   6716 		    WTX_TCPIP_TUCSO(offset +
   6717 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6718 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6719 	} else {
   6720 		/* Just initialize it to a valid TCP context. */
   6721 		tucs = WTX_TCPIP_TUCSS(offset) |
   6722 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6723 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6724 	}
   6725 
   6726 	/*
   6727 	 * We don't have to write context descriptor for every packet
   6728 	 * except for 82574. For 82574, we must write context descriptor
   6729 	 * for every packet when we use two descriptor queues.
   6730 	 * It would be overhead to write context descriptor for every packet,
   6731 	 * however it does not cause problems.
   6732 	 */
   6733 	/* Fill in the context descriptor. */
   6734 	t = (struct livengood_tcpip_ctxdesc *)
   6735 	    &txq->txq_descs[txq->txq_next];
   6736 	t->tcpip_ipcs = htole32(ipcs);
   6737 	t->tcpip_tucs = htole32(tucs);
   6738 	t->tcpip_cmdlen = htole32(cmdlen);
   6739 	t->tcpip_seg = htole32(seg);
   6740 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6741 
   6742 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6743 	txs->txs_ndesc++;
   6744 
   6745 	*cmdp = cmd;
   6746 	*fieldsp = fields;
   6747 
   6748 	return 0;
   6749 }
   6750 
   6751 static inline int
   6752 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6753 {
   6754 	struct wm_softc *sc = ifp->if_softc;
   6755 	u_int cpuid = cpu_index(curcpu());
   6756 
   6757 	/*
   6758 	 * Currently, simple distribute strategy.
   6759 	 * TODO:
   6760 	 * distribute by flowid(RSS has value).
   6761 	 */
   6762         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6763 }
   6764 
   6765 /*
   6766  * wm_start:		[ifnet interface function]
   6767  *
   6768  *	Start packet transmission on the interface.
   6769  */
   6770 static void
   6771 wm_start(struct ifnet *ifp)
   6772 {
   6773 	struct wm_softc *sc = ifp->if_softc;
   6774 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6775 
   6776 #ifdef WM_MPSAFE
   6777 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6778 #endif
   6779 	/*
   6780 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6781 	 */
   6782 
   6783 	mutex_enter(txq->txq_lock);
   6784 	if (!txq->txq_stopping)
   6785 		wm_start_locked(ifp);
   6786 	mutex_exit(txq->txq_lock);
   6787 }
   6788 
   6789 static void
   6790 wm_start_locked(struct ifnet *ifp)
   6791 {
   6792 	struct wm_softc *sc = ifp->if_softc;
   6793 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6794 
   6795 	wm_send_common_locked(ifp, txq, false);
   6796 }
   6797 
   6798 static int
   6799 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6800 {
   6801 	int qid;
   6802 	struct wm_softc *sc = ifp->if_softc;
   6803 	struct wm_txqueue *txq;
   6804 
   6805 	qid = wm_select_txqueue(ifp, m);
   6806 	txq = &sc->sc_queue[qid].wmq_txq;
   6807 
   6808 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6809 		m_freem(m);
   6810 		WM_Q_EVCNT_INCR(txq, txdrop);
   6811 		return ENOBUFS;
   6812 	}
   6813 
   6814 	/*
   6815 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6816 	 */
   6817 	ifp->if_obytes += m->m_pkthdr.len;
   6818 	if (m->m_flags & M_MCAST)
   6819 		ifp->if_omcasts++;
   6820 
   6821 	if (mutex_tryenter(txq->txq_lock)) {
   6822 		if (!txq->txq_stopping)
   6823 			wm_transmit_locked(ifp, txq);
   6824 		mutex_exit(txq->txq_lock);
   6825 	}
   6826 
   6827 	return 0;
   6828 }
   6829 
   6830 static void
   6831 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6832 {
   6833 
   6834 	wm_send_common_locked(ifp, txq, true);
   6835 }
   6836 
   6837 static void
   6838 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6839     bool is_transmit)
   6840 {
   6841 	struct wm_softc *sc = ifp->if_softc;
   6842 	struct mbuf *m0;
   6843 	struct m_tag *mtag;
   6844 	struct wm_txsoft *txs;
   6845 	bus_dmamap_t dmamap;
   6846 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6847 	bus_addr_t curaddr;
   6848 	bus_size_t seglen, curlen;
   6849 	uint32_t cksumcmd;
   6850 	uint8_t cksumfields;
   6851 
   6852 	KASSERT(mutex_owned(txq->txq_lock));
   6853 
   6854 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6855 		return;
   6856 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6857 		return;
   6858 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6859 		return;
   6860 
   6861 	/* Remember the previous number of free descriptors. */
   6862 	ofree = txq->txq_free;
   6863 
   6864 	/*
   6865 	 * Loop through the send queue, setting up transmit descriptors
   6866 	 * until we drain the queue, or use up all available transmit
   6867 	 * descriptors.
   6868 	 */
   6869 	for (;;) {
   6870 		m0 = NULL;
   6871 
   6872 		/* Get a work queue entry. */
   6873 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6874 			wm_txeof(sc, txq);
   6875 			if (txq->txq_sfree == 0) {
   6876 				DPRINTF(WM_DEBUG_TX,
   6877 				    ("%s: TX: no free job descriptors\n",
   6878 					device_xname(sc->sc_dev)));
   6879 				WM_Q_EVCNT_INCR(txq, txsstall);
   6880 				break;
   6881 			}
   6882 		}
   6883 
   6884 		/* Grab a packet off the queue. */
   6885 		if (is_transmit)
   6886 			m0 = pcq_get(txq->txq_interq);
   6887 		else
   6888 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6889 		if (m0 == NULL)
   6890 			break;
   6891 
   6892 		DPRINTF(WM_DEBUG_TX,
   6893 		    ("%s: TX: have packet to transmit: %p\n",
   6894 		    device_xname(sc->sc_dev), m0));
   6895 
   6896 		txs = &txq->txq_soft[txq->txq_snext];
   6897 		dmamap = txs->txs_dmamap;
   6898 
   6899 		use_tso = (m0->m_pkthdr.csum_flags &
   6900 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6901 
   6902 		/*
   6903 		 * So says the Linux driver:
   6904 		 * The controller does a simple calculation to make sure
   6905 		 * there is enough room in the FIFO before initiating the
   6906 		 * DMA for each buffer.  The calc is:
   6907 		 *	4 = ceil(buffer len / MSS)
   6908 		 * To make sure we don't overrun the FIFO, adjust the max
   6909 		 * buffer len if the MSS drops.
   6910 		 */
   6911 		dmamap->dm_maxsegsz =
   6912 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6913 		    ? m0->m_pkthdr.segsz << 2
   6914 		    : WTX_MAX_LEN;
   6915 
   6916 		/*
   6917 		 * Load the DMA map.  If this fails, the packet either
   6918 		 * didn't fit in the allotted number of segments, or we
   6919 		 * were short on resources.  For the too-many-segments
   6920 		 * case, we simply report an error and drop the packet,
   6921 		 * since we can't sanely copy a jumbo packet to a single
   6922 		 * buffer.
   6923 		 */
   6924 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6925 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6926 		if (error) {
   6927 			if (error == EFBIG) {
   6928 				WM_Q_EVCNT_INCR(txq, txdrop);
   6929 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6930 				    "DMA segments, dropping...\n",
   6931 				    device_xname(sc->sc_dev));
   6932 				wm_dump_mbuf_chain(sc, m0);
   6933 				m_freem(m0);
   6934 				continue;
   6935 			}
   6936 			/*  Short on resources, just stop for now. */
   6937 			DPRINTF(WM_DEBUG_TX,
   6938 			    ("%s: TX: dmamap load failed: %d\n",
   6939 			    device_xname(sc->sc_dev), error));
   6940 			break;
   6941 		}
   6942 
   6943 		segs_needed = dmamap->dm_nsegs;
   6944 		if (use_tso) {
   6945 			/* For sentinel descriptor; see below. */
   6946 			segs_needed++;
   6947 		}
   6948 
   6949 		/*
   6950 		 * Ensure we have enough descriptors free to describe
   6951 		 * the packet.  Note, we always reserve one descriptor
   6952 		 * at the end of the ring due to the semantics of the
   6953 		 * TDT register, plus one more in the event we need
   6954 		 * to load offload context.
   6955 		 */
   6956 		if (segs_needed > txq->txq_free - 2) {
   6957 			/*
   6958 			 * Not enough free descriptors to transmit this
   6959 			 * packet.  We haven't committed anything yet,
   6960 			 * so just unload the DMA map, put the packet
   6961 			 * pack on the queue, and punt.  Notify the upper
   6962 			 * layer that there are no more slots left.
   6963 			 */
   6964 			DPRINTF(WM_DEBUG_TX,
   6965 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6966 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6967 			    segs_needed, txq->txq_free - 1));
   6968 			if (!is_transmit)
   6969 				ifp->if_flags |= IFF_OACTIVE;
   6970 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6971 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6972 			WM_Q_EVCNT_INCR(txq, txdstall);
   6973 			break;
   6974 		}
   6975 
   6976 		/*
   6977 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6978 		 * once we know we can transmit the packet, since we
   6979 		 * do some internal FIFO space accounting here.
   6980 		 */
   6981 		if (sc->sc_type == WM_T_82547 &&
   6982 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6983 			DPRINTF(WM_DEBUG_TX,
   6984 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6985 			    device_xname(sc->sc_dev)));
   6986 			if (!is_transmit)
   6987 				ifp->if_flags |= IFF_OACTIVE;
   6988 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6989 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6990 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6991 			break;
   6992 		}
   6993 
   6994 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6995 
   6996 		DPRINTF(WM_DEBUG_TX,
   6997 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6998 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6999 
   7000 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7001 
   7002 		/*
   7003 		 * Store a pointer to the packet so that we can free it
   7004 		 * later.
   7005 		 *
   7006 		 * Initially, we consider the number of descriptors the
   7007 		 * packet uses the number of DMA segments.  This may be
   7008 		 * incremented by 1 if we do checksum offload (a descriptor
   7009 		 * is used to set the checksum context).
   7010 		 */
   7011 		txs->txs_mbuf = m0;
   7012 		txs->txs_firstdesc = txq->txq_next;
   7013 		txs->txs_ndesc = segs_needed;
   7014 
   7015 		/* Set up offload parameters for this packet. */
   7016 		if (m0->m_pkthdr.csum_flags &
   7017 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7018 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7019 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7020 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7021 					  &cksumfields) != 0) {
   7022 				/* Error message already displayed. */
   7023 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7024 				continue;
   7025 			}
   7026 		} else {
   7027 			cksumcmd = 0;
   7028 			cksumfields = 0;
   7029 		}
   7030 
   7031 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7032 
   7033 		/* Sync the DMA map. */
   7034 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7035 		    BUS_DMASYNC_PREWRITE);
   7036 
   7037 		/* Initialize the transmit descriptor. */
   7038 		for (nexttx = txq->txq_next, seg = 0;
   7039 		     seg < dmamap->dm_nsegs; seg++) {
   7040 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7041 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7042 			     seglen != 0;
   7043 			     curaddr += curlen, seglen -= curlen,
   7044 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7045 				curlen = seglen;
   7046 
   7047 				/*
   7048 				 * So says the Linux driver:
   7049 				 * Work around for premature descriptor
   7050 				 * write-backs in TSO mode.  Append a
   7051 				 * 4-byte sentinel descriptor.
   7052 				 */
   7053 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7054 				    curlen > 8)
   7055 					curlen -= 4;
   7056 
   7057 				wm_set_dma_addr(
   7058 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7059 				txq->txq_descs[nexttx].wtx_cmdlen
   7060 				    = htole32(cksumcmd | curlen);
   7061 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7062 				    = 0;
   7063 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7064 				    = cksumfields;
   7065 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7066 				lasttx = nexttx;
   7067 
   7068 				DPRINTF(WM_DEBUG_TX,
   7069 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7070 				     "len %#04zx\n",
   7071 				    device_xname(sc->sc_dev), nexttx,
   7072 				    (uint64_t)curaddr, curlen));
   7073 			}
   7074 		}
   7075 
   7076 		KASSERT(lasttx != -1);
   7077 
   7078 		/*
   7079 		 * Set up the command byte on the last descriptor of
   7080 		 * the packet.  If we're in the interrupt delay window,
   7081 		 * delay the interrupt.
   7082 		 */
   7083 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7084 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7085 
   7086 		/*
   7087 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7088 		 * up the descriptor to encapsulate the packet for us.
   7089 		 *
   7090 		 * This is only valid on the last descriptor of the packet.
   7091 		 */
   7092 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7093 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7094 			    htole32(WTX_CMD_VLE);
   7095 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7096 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7097 		}
   7098 
   7099 		txs->txs_lastdesc = lasttx;
   7100 
   7101 		DPRINTF(WM_DEBUG_TX,
   7102 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7103 		    device_xname(sc->sc_dev),
   7104 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7105 
   7106 		/* Sync the descriptors we're using. */
   7107 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7108 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7109 
   7110 		/* Give the packet to the chip. */
   7111 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7112 
   7113 		DPRINTF(WM_DEBUG_TX,
   7114 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7115 
   7116 		DPRINTF(WM_DEBUG_TX,
   7117 		    ("%s: TX: finished transmitting packet, job %d\n",
   7118 		    device_xname(sc->sc_dev), txq->txq_snext));
   7119 
   7120 		/* Advance the tx pointer. */
   7121 		txq->txq_free -= txs->txs_ndesc;
   7122 		txq->txq_next = nexttx;
   7123 
   7124 		txq->txq_sfree--;
   7125 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7126 
   7127 		/* Pass the packet to any BPF listeners. */
   7128 		bpf_mtap(ifp, m0);
   7129 	}
   7130 
   7131 	if (m0 != NULL) {
   7132 		if (!is_transmit)
   7133 			ifp->if_flags |= IFF_OACTIVE;
   7134 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7135 		WM_Q_EVCNT_INCR(txq, txdrop);
   7136 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7137 			__func__));
   7138 		m_freem(m0);
   7139 	}
   7140 
   7141 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7142 		/* No more slots; notify upper layer. */
   7143 		if (!is_transmit)
   7144 			ifp->if_flags |= IFF_OACTIVE;
   7145 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7146 	}
   7147 
   7148 	if (txq->txq_free != ofree) {
   7149 		/* Set a watchdog timer in case the chip flakes out. */
   7150 		ifp->if_timer = 5;
   7151 	}
   7152 }
   7153 
   7154 /*
   7155  * wm_nq_tx_offload:
   7156  *
   7157  *	Set up TCP/IP checksumming parameters for the
   7158  *	specified packet, for NEWQUEUE devices
   7159  */
   7160 static int
   7161 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7162     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7163 {
   7164 	struct mbuf *m0 = txs->txs_mbuf;
   7165 	struct m_tag *mtag;
   7166 	uint32_t vl_len, mssidx, cmdc;
   7167 	struct ether_header *eh;
   7168 	int offset, iphl;
   7169 
   7170 	/*
   7171 	 * XXX It would be nice if the mbuf pkthdr had offset
   7172 	 * fields for the protocol headers.
   7173 	 */
   7174 	*cmdlenp = 0;
   7175 	*fieldsp = 0;
   7176 
   7177 	eh = mtod(m0, struct ether_header *);
   7178 	switch (htons(eh->ether_type)) {
   7179 	case ETHERTYPE_IP:
   7180 	case ETHERTYPE_IPV6:
   7181 		offset = ETHER_HDR_LEN;
   7182 		break;
   7183 
   7184 	case ETHERTYPE_VLAN:
   7185 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7186 		break;
   7187 
   7188 	default:
   7189 		/* Don't support this protocol or encapsulation. */
   7190 		*do_csum = false;
   7191 		return 0;
   7192 	}
   7193 	*do_csum = true;
   7194 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7195 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7196 
   7197 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7198 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7199 
   7200 	if ((m0->m_pkthdr.csum_flags &
   7201 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7202 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7203 	} else {
   7204 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7205 	}
   7206 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7207 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7208 
   7209 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7210 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7211 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7212 		*cmdlenp |= NQTX_CMD_VLE;
   7213 	}
   7214 
   7215 	mssidx = 0;
   7216 
   7217 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7218 		int hlen = offset + iphl;
   7219 		int tcp_hlen;
   7220 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7221 
   7222 		if (__predict_false(m0->m_len <
   7223 				    (hlen + sizeof(struct tcphdr)))) {
   7224 			/*
   7225 			 * TCP/IP headers are not in the first mbuf; we need
   7226 			 * to do this the slow and painful way.  Let's just
   7227 			 * hope this doesn't happen very often.
   7228 			 */
   7229 			struct tcphdr th;
   7230 
   7231 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7232 
   7233 			m_copydata(m0, hlen, sizeof(th), &th);
   7234 			if (v4) {
   7235 				struct ip ip;
   7236 
   7237 				m_copydata(m0, offset, sizeof(ip), &ip);
   7238 				ip.ip_len = 0;
   7239 				m_copyback(m0,
   7240 				    offset + offsetof(struct ip, ip_len),
   7241 				    sizeof(ip.ip_len), &ip.ip_len);
   7242 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7243 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7244 			} else {
   7245 				struct ip6_hdr ip6;
   7246 
   7247 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7248 				ip6.ip6_plen = 0;
   7249 				m_copyback(m0,
   7250 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7251 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7252 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7253 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7254 			}
   7255 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7256 			    sizeof(th.th_sum), &th.th_sum);
   7257 
   7258 			tcp_hlen = th.th_off << 2;
   7259 		} else {
   7260 			/*
   7261 			 * TCP/IP headers are in the first mbuf; we can do
   7262 			 * this the easy way.
   7263 			 */
   7264 			struct tcphdr *th;
   7265 
   7266 			if (v4) {
   7267 				struct ip *ip =
   7268 				    (void *)(mtod(m0, char *) + offset);
   7269 				th = (void *)(mtod(m0, char *) + hlen);
   7270 
   7271 				ip->ip_len = 0;
   7272 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7273 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7274 			} else {
   7275 				struct ip6_hdr *ip6 =
   7276 				    (void *)(mtod(m0, char *) + offset);
   7277 				th = (void *)(mtod(m0, char *) + hlen);
   7278 
   7279 				ip6->ip6_plen = 0;
   7280 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7281 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7282 			}
   7283 			tcp_hlen = th->th_off << 2;
   7284 		}
   7285 		hlen += tcp_hlen;
   7286 		*cmdlenp |= NQTX_CMD_TSE;
   7287 
   7288 		if (v4) {
   7289 			WM_Q_EVCNT_INCR(txq, txtso);
   7290 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7291 		} else {
   7292 			WM_Q_EVCNT_INCR(txq, txtso6);
   7293 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7294 		}
   7295 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7296 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7297 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7298 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7299 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7300 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7301 	} else {
   7302 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7303 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7304 	}
   7305 
   7306 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7307 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7308 		cmdc |= NQTXC_CMD_IP4;
   7309 	}
   7310 
   7311 	if (m0->m_pkthdr.csum_flags &
   7312 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7313 		WM_Q_EVCNT_INCR(txq, txtusum);
   7314 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7315 			cmdc |= NQTXC_CMD_TCP;
   7316 		} else {
   7317 			cmdc |= NQTXC_CMD_UDP;
   7318 		}
   7319 		cmdc |= NQTXC_CMD_IP4;
   7320 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7321 	}
   7322 	if (m0->m_pkthdr.csum_flags &
   7323 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7324 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7325 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7326 			cmdc |= NQTXC_CMD_TCP;
   7327 		} else {
   7328 			cmdc |= NQTXC_CMD_UDP;
   7329 		}
   7330 		cmdc |= NQTXC_CMD_IP6;
   7331 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7332 	}
   7333 
   7334 	/*
   7335 	 * We don't have to write context descriptor for every packet to
   7336 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7337 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7338 	 * controllers.
   7339 	 * It would be overhead to write context descriptor for every packet,
   7340 	 * however it does not cause problems.
   7341 	 */
   7342 	/* Fill in the context descriptor. */
   7343 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7344 	    htole32(vl_len);
   7345 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7346 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7347 	    htole32(cmdc);
   7348 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7349 	    htole32(mssidx);
   7350 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7351 	DPRINTF(WM_DEBUG_TX,
   7352 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7353 	    txq->txq_next, 0, vl_len));
   7354 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7355 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7356 	txs->txs_ndesc++;
   7357 	return 0;
   7358 }
   7359 
   7360 /*
   7361  * wm_nq_start:		[ifnet interface function]
   7362  *
   7363  *	Start packet transmission on the interface for NEWQUEUE devices
   7364  */
   7365 static void
   7366 wm_nq_start(struct ifnet *ifp)
   7367 {
   7368 	struct wm_softc *sc = ifp->if_softc;
   7369 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7370 
   7371 #ifdef WM_MPSAFE
   7372 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7373 #endif
   7374 	/*
   7375 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7376 	 */
   7377 
   7378 	mutex_enter(txq->txq_lock);
   7379 	if (!txq->txq_stopping)
   7380 		wm_nq_start_locked(ifp);
   7381 	mutex_exit(txq->txq_lock);
   7382 }
   7383 
   7384 static void
   7385 wm_nq_start_locked(struct ifnet *ifp)
   7386 {
   7387 	struct wm_softc *sc = ifp->if_softc;
   7388 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7389 
   7390 	wm_nq_send_common_locked(ifp, txq, false);
   7391 }
   7392 
   7393 static int
   7394 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7395 {
   7396 	int qid;
   7397 	struct wm_softc *sc = ifp->if_softc;
   7398 	struct wm_txqueue *txq;
   7399 
   7400 	qid = wm_select_txqueue(ifp, m);
   7401 	txq = &sc->sc_queue[qid].wmq_txq;
   7402 
   7403 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7404 		m_freem(m);
   7405 		WM_Q_EVCNT_INCR(txq, txdrop);
   7406 		return ENOBUFS;
   7407 	}
   7408 
   7409 	/*
   7410 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7411 	 */
   7412 	ifp->if_obytes += m->m_pkthdr.len;
   7413 	if (m->m_flags & M_MCAST)
   7414 		ifp->if_omcasts++;
   7415 
   7416 	/*
   7417 	 * The situations which this mutex_tryenter() fails at running time
   7418 	 * are below two patterns.
   7419 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7420 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7421 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7422 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7423 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7424 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7425 	 */
   7426 	if (mutex_tryenter(txq->txq_lock)) {
   7427 		if (!txq->txq_stopping)
   7428 			wm_nq_transmit_locked(ifp, txq);
   7429 		mutex_exit(txq->txq_lock);
   7430 	}
   7431 
   7432 	return 0;
   7433 }
   7434 
   7435 static void
   7436 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7437 {
   7438 
   7439 	wm_nq_send_common_locked(ifp, txq, true);
   7440 }
   7441 
   7442 static void
   7443 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7444     bool is_transmit)
   7445 {
   7446 	struct wm_softc *sc = ifp->if_softc;
   7447 	struct mbuf *m0;
   7448 	struct m_tag *mtag;
   7449 	struct wm_txsoft *txs;
   7450 	bus_dmamap_t dmamap;
   7451 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7452 	bool do_csum, sent;
   7453 
   7454 	KASSERT(mutex_owned(txq->txq_lock));
   7455 
   7456 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7457 		return;
   7458 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7459 		return;
   7460 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7461 		return;
   7462 
   7463 	sent = false;
   7464 
   7465 	/*
   7466 	 * Loop through the send queue, setting up transmit descriptors
   7467 	 * until we drain the queue, or use up all available transmit
   7468 	 * descriptors.
   7469 	 */
   7470 	for (;;) {
   7471 		m0 = NULL;
   7472 
   7473 		/* Get a work queue entry. */
   7474 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7475 			wm_txeof(sc, txq);
   7476 			if (txq->txq_sfree == 0) {
   7477 				DPRINTF(WM_DEBUG_TX,
   7478 				    ("%s: TX: no free job descriptors\n",
   7479 					device_xname(sc->sc_dev)));
   7480 				WM_Q_EVCNT_INCR(txq, txsstall);
   7481 				break;
   7482 			}
   7483 		}
   7484 
   7485 		/* Grab a packet off the queue. */
   7486 		if (is_transmit)
   7487 			m0 = pcq_get(txq->txq_interq);
   7488 		else
   7489 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7490 		if (m0 == NULL)
   7491 			break;
   7492 
   7493 		DPRINTF(WM_DEBUG_TX,
   7494 		    ("%s: TX: have packet to transmit: %p\n",
   7495 		    device_xname(sc->sc_dev), m0));
   7496 
   7497 		txs = &txq->txq_soft[txq->txq_snext];
   7498 		dmamap = txs->txs_dmamap;
   7499 
   7500 		/*
   7501 		 * Load the DMA map.  If this fails, the packet either
   7502 		 * didn't fit in the allotted number of segments, or we
   7503 		 * were short on resources.  For the too-many-segments
   7504 		 * case, we simply report an error and drop the packet,
   7505 		 * since we can't sanely copy a jumbo packet to a single
   7506 		 * buffer.
   7507 		 */
   7508 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7509 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7510 		if (error) {
   7511 			if (error == EFBIG) {
   7512 				WM_Q_EVCNT_INCR(txq, txdrop);
   7513 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7514 				    "DMA segments, dropping...\n",
   7515 				    device_xname(sc->sc_dev));
   7516 				wm_dump_mbuf_chain(sc, m0);
   7517 				m_freem(m0);
   7518 				continue;
   7519 			}
   7520 			/* Short on resources, just stop for now. */
   7521 			DPRINTF(WM_DEBUG_TX,
   7522 			    ("%s: TX: dmamap load failed: %d\n",
   7523 			    device_xname(sc->sc_dev), error));
   7524 			break;
   7525 		}
   7526 
   7527 		segs_needed = dmamap->dm_nsegs;
   7528 
   7529 		/*
   7530 		 * Ensure we have enough descriptors free to describe
   7531 		 * the packet.  Note, we always reserve one descriptor
   7532 		 * at the end of the ring due to the semantics of the
   7533 		 * TDT register, plus one more in the event we need
   7534 		 * to load offload context.
   7535 		 */
   7536 		if (segs_needed > txq->txq_free - 2) {
   7537 			/*
   7538 			 * Not enough free descriptors to transmit this
   7539 			 * packet.  We haven't committed anything yet,
   7540 			 * so just unload the DMA map, put the packet
   7541 			 * pack on the queue, and punt.  Notify the upper
   7542 			 * layer that there are no more slots left.
   7543 			 */
   7544 			DPRINTF(WM_DEBUG_TX,
   7545 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7546 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7547 			    segs_needed, txq->txq_free - 1));
   7548 			if (!is_transmit)
   7549 				ifp->if_flags |= IFF_OACTIVE;
   7550 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7551 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7552 			WM_Q_EVCNT_INCR(txq, txdstall);
   7553 			break;
   7554 		}
   7555 
   7556 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7557 
   7558 		DPRINTF(WM_DEBUG_TX,
   7559 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7560 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7561 
   7562 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7563 
   7564 		/*
   7565 		 * Store a pointer to the packet so that we can free it
   7566 		 * later.
   7567 		 *
   7568 		 * Initially, we consider the number of descriptors the
   7569 		 * packet uses the number of DMA segments.  This may be
   7570 		 * incremented by 1 if we do checksum offload (a descriptor
   7571 		 * is used to set the checksum context).
   7572 		 */
   7573 		txs->txs_mbuf = m0;
   7574 		txs->txs_firstdesc = txq->txq_next;
   7575 		txs->txs_ndesc = segs_needed;
   7576 
   7577 		/* Set up offload parameters for this packet. */
   7578 		uint32_t cmdlen, fields, dcmdlen;
   7579 		if (m0->m_pkthdr.csum_flags &
   7580 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7581 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7582 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7583 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7584 			    &do_csum) != 0) {
   7585 				/* Error message already displayed. */
   7586 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7587 				continue;
   7588 			}
   7589 		} else {
   7590 			do_csum = false;
   7591 			cmdlen = 0;
   7592 			fields = 0;
   7593 		}
   7594 
   7595 		/* Sync the DMA map. */
   7596 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7597 		    BUS_DMASYNC_PREWRITE);
   7598 
   7599 		/* Initialize the first transmit descriptor. */
   7600 		nexttx = txq->txq_next;
   7601 		if (!do_csum) {
   7602 			/* setup a legacy descriptor */
   7603 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7604 			    dmamap->dm_segs[0].ds_addr);
   7605 			txq->txq_descs[nexttx].wtx_cmdlen =
   7606 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7607 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7608 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7609 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7610 			    NULL) {
   7611 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7612 				    htole32(WTX_CMD_VLE);
   7613 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7614 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7615 			} else {
   7616 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7617 			}
   7618 			dcmdlen = 0;
   7619 		} else {
   7620 			/* setup an advanced data descriptor */
   7621 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7622 			    htole64(dmamap->dm_segs[0].ds_addr);
   7623 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7624 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7625 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7626 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7627 			    htole32(fields);
   7628 			DPRINTF(WM_DEBUG_TX,
   7629 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7630 			    device_xname(sc->sc_dev), nexttx,
   7631 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7632 			DPRINTF(WM_DEBUG_TX,
   7633 			    ("\t 0x%08x%08x\n", fields,
   7634 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7635 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7636 		}
   7637 
   7638 		lasttx = nexttx;
   7639 		nexttx = WM_NEXTTX(txq, nexttx);
   7640 		/*
   7641 		 * fill in the next descriptors. legacy or adcanced format
   7642 		 * is the same here
   7643 		 */
   7644 		for (seg = 1; seg < dmamap->dm_nsegs;
   7645 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7646 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7647 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7648 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7649 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7650 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7651 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7652 			lasttx = nexttx;
   7653 
   7654 			DPRINTF(WM_DEBUG_TX,
   7655 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7656 			     "len %#04zx\n",
   7657 			    device_xname(sc->sc_dev), nexttx,
   7658 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7659 			    dmamap->dm_segs[seg].ds_len));
   7660 		}
   7661 
   7662 		KASSERT(lasttx != -1);
   7663 
   7664 		/*
   7665 		 * Set up the command byte on the last descriptor of
   7666 		 * the packet.  If we're in the interrupt delay window,
   7667 		 * delay the interrupt.
   7668 		 */
   7669 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7670 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7671 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7672 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7673 
   7674 		txs->txs_lastdesc = lasttx;
   7675 
   7676 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7677 		    device_xname(sc->sc_dev),
   7678 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7679 
   7680 		/* Sync the descriptors we're using. */
   7681 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7682 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7683 
   7684 		/* Give the packet to the chip. */
   7685 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7686 		sent = true;
   7687 
   7688 		DPRINTF(WM_DEBUG_TX,
   7689 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7690 
   7691 		DPRINTF(WM_DEBUG_TX,
   7692 		    ("%s: TX: finished transmitting packet, job %d\n",
   7693 		    device_xname(sc->sc_dev), txq->txq_snext));
   7694 
   7695 		/* Advance the tx pointer. */
   7696 		txq->txq_free -= txs->txs_ndesc;
   7697 		txq->txq_next = nexttx;
   7698 
   7699 		txq->txq_sfree--;
   7700 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7701 
   7702 		/* Pass the packet to any BPF listeners. */
   7703 		bpf_mtap(ifp, m0);
   7704 	}
   7705 
   7706 	if (m0 != NULL) {
   7707 		if (!is_transmit)
   7708 			ifp->if_flags |= IFF_OACTIVE;
   7709 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7710 		WM_Q_EVCNT_INCR(txq, txdrop);
   7711 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7712 			__func__));
   7713 		m_freem(m0);
   7714 	}
   7715 
   7716 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7717 		/* No more slots; notify upper layer. */
   7718 		if (!is_transmit)
   7719 			ifp->if_flags |= IFF_OACTIVE;
   7720 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7721 	}
   7722 
   7723 	if (sent) {
   7724 		/* Set a watchdog timer in case the chip flakes out. */
   7725 		ifp->if_timer = 5;
   7726 	}
   7727 }
   7728 
   7729 static void
   7730 wm_deferred_start_locked(struct wm_txqueue *txq)
   7731 {
   7732 	struct wm_softc *sc = txq->txq_sc;
   7733 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7734 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7735 	int qid = wmq->wmq_id;
   7736 
   7737 	KASSERT(mutex_owned(txq->txq_lock));
   7738 
   7739 	if (txq->txq_stopping) {
   7740 		mutex_exit(txq->txq_lock);
   7741 		return;
   7742 	}
   7743 
   7744 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7745 		/* XXX need for ALTQ or one CPU system */
   7746 		if (qid == 0)
   7747 			wm_nq_start_locked(ifp);
   7748 		wm_nq_transmit_locked(ifp, txq);
   7749 	} else {
   7750 		/* XXX need for ALTQ or one CPU system */
   7751 		if (qid == 0)
   7752 			wm_start_locked(ifp);
   7753 		wm_transmit_locked(ifp, txq);
   7754 	}
   7755 }
   7756 
   7757 /* Interrupt */
   7758 
   7759 /*
   7760  * wm_txeof:
   7761  *
   7762  *	Helper; handle transmit interrupts.
   7763  */
   7764 static int
   7765 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7766 {
   7767 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7768 	struct wm_txsoft *txs;
   7769 	bool processed = false;
   7770 	int count = 0;
   7771 	int i;
   7772 	uint8_t status;
   7773 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7774 
   7775 	KASSERT(mutex_owned(txq->txq_lock));
   7776 
   7777 	if (txq->txq_stopping)
   7778 		return 0;
   7779 
   7780 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7781 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7782 	if (wmq->wmq_id == 0)
   7783 		ifp->if_flags &= ~IFF_OACTIVE;
   7784 
   7785 	/*
   7786 	 * Go through the Tx list and free mbufs for those
   7787 	 * frames which have been transmitted.
   7788 	 */
   7789 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7790 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7791 		txs = &txq->txq_soft[i];
   7792 
   7793 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7794 			device_xname(sc->sc_dev), i));
   7795 
   7796 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7797 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7798 
   7799 		status =
   7800 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7801 		if ((status & WTX_ST_DD) == 0) {
   7802 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7803 			    BUS_DMASYNC_PREREAD);
   7804 			break;
   7805 		}
   7806 
   7807 		processed = true;
   7808 		count++;
   7809 		DPRINTF(WM_DEBUG_TX,
   7810 		    ("%s: TX: job %d done: descs %d..%d\n",
   7811 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7812 		    txs->txs_lastdesc));
   7813 
   7814 		/*
   7815 		 * XXX We should probably be using the statistics
   7816 		 * XXX registers, but I don't know if they exist
   7817 		 * XXX on chips before the i82544.
   7818 		 */
   7819 
   7820 #ifdef WM_EVENT_COUNTERS
   7821 		if (status & WTX_ST_TU)
   7822 			WM_Q_EVCNT_INCR(txq, tu);
   7823 #endif /* WM_EVENT_COUNTERS */
   7824 
   7825 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7826 			ifp->if_oerrors++;
   7827 			if (status & WTX_ST_LC)
   7828 				log(LOG_WARNING, "%s: late collision\n",
   7829 				    device_xname(sc->sc_dev));
   7830 			else if (status & WTX_ST_EC) {
   7831 				ifp->if_collisions += 16;
   7832 				log(LOG_WARNING, "%s: excessive collisions\n",
   7833 				    device_xname(sc->sc_dev));
   7834 			}
   7835 		} else
   7836 			ifp->if_opackets++;
   7837 
   7838 		txq->txq_packets++;
   7839 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7840 
   7841 		txq->txq_free += txs->txs_ndesc;
   7842 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7843 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7844 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7845 		m_freem(txs->txs_mbuf);
   7846 		txs->txs_mbuf = NULL;
   7847 	}
   7848 
   7849 	/* Update the dirty transmit buffer pointer. */
   7850 	txq->txq_sdirty = i;
   7851 	DPRINTF(WM_DEBUG_TX,
   7852 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7853 
   7854 	if (count != 0)
   7855 		rnd_add_uint32(&sc->rnd_source, count);
   7856 
   7857 	/*
   7858 	 * If there are no more pending transmissions, cancel the watchdog
   7859 	 * timer.
   7860 	 */
   7861 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7862 		ifp->if_timer = 0;
   7863 
   7864 	return processed;
   7865 }
   7866 
   7867 static inline uint32_t
   7868 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7869 {
   7870 	struct wm_softc *sc = rxq->rxq_sc;
   7871 
   7872 	if (sc->sc_type == WM_T_82574)
   7873 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7874 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7875 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7876 	else
   7877 		return rxq->rxq_descs[idx].wrx_status;
   7878 }
   7879 
   7880 static inline uint32_t
   7881 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7882 {
   7883 	struct wm_softc *sc = rxq->rxq_sc;
   7884 
   7885 	if (sc->sc_type == WM_T_82574)
   7886 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7887 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7888 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7889 	else
   7890 		return rxq->rxq_descs[idx].wrx_errors;
   7891 }
   7892 
   7893 static inline uint16_t
   7894 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7895 {
   7896 	struct wm_softc *sc = rxq->rxq_sc;
   7897 
   7898 	if (sc->sc_type == WM_T_82574)
   7899 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7900 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7901 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7902 	else
   7903 		return rxq->rxq_descs[idx].wrx_special;
   7904 }
   7905 
   7906 static inline int
   7907 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7908 {
   7909 	struct wm_softc *sc = rxq->rxq_sc;
   7910 
   7911 	if (sc->sc_type == WM_T_82574)
   7912 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7913 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7914 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7915 	else
   7916 		return rxq->rxq_descs[idx].wrx_len;
   7917 }
   7918 
   7919 #ifdef WM_DEBUG
   7920 static inline uint32_t
   7921 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7922 {
   7923 	struct wm_softc *sc = rxq->rxq_sc;
   7924 
   7925 	if (sc->sc_type == WM_T_82574)
   7926 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7927 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7928 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7929 	else
   7930 		return 0;
   7931 }
   7932 
   7933 static inline uint8_t
   7934 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7935 {
   7936 	struct wm_softc *sc = rxq->rxq_sc;
   7937 
   7938 	if (sc->sc_type == WM_T_82574)
   7939 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7940 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7941 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7942 	else
   7943 		return 0;
   7944 }
   7945 #endif /* WM_DEBUG */
   7946 
   7947 static inline bool
   7948 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7949     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7950 {
   7951 
   7952 	if (sc->sc_type == WM_T_82574)
   7953 		return (status & ext_bit) != 0;
   7954 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7955 		return (status & nq_bit) != 0;
   7956 	else
   7957 		return (status & legacy_bit) != 0;
   7958 }
   7959 
   7960 static inline bool
   7961 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7962     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7963 {
   7964 
   7965 	if (sc->sc_type == WM_T_82574)
   7966 		return (error & ext_bit) != 0;
   7967 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7968 		return (error & nq_bit) != 0;
   7969 	else
   7970 		return (error & legacy_bit) != 0;
   7971 }
   7972 
   7973 static inline bool
   7974 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7975 {
   7976 
   7977 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7978 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7979 		return true;
   7980 	else
   7981 		return false;
   7982 }
   7983 
   7984 static inline bool
   7985 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7986 {
   7987 	struct wm_softc *sc = rxq->rxq_sc;
   7988 
   7989 	/* XXXX missing error bit for newqueue? */
   7990 	if (wm_rxdesc_is_set_error(sc, errors,
   7991 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7992 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7993 		NQRXC_ERROR_RXE)) {
   7994 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7995 			log(LOG_WARNING, "%s: symbol error\n",
   7996 			    device_xname(sc->sc_dev));
   7997 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7998 			log(LOG_WARNING, "%s: receive sequence error\n",
   7999 			    device_xname(sc->sc_dev));
   8000 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8001 			log(LOG_WARNING, "%s: CRC error\n",
   8002 			    device_xname(sc->sc_dev));
   8003 		return true;
   8004 	}
   8005 
   8006 	return false;
   8007 }
   8008 
   8009 static inline bool
   8010 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8011 {
   8012 	struct wm_softc *sc = rxq->rxq_sc;
   8013 
   8014 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8015 		NQRXC_STATUS_DD)) {
   8016 		/* We have processed all of the receive descriptors. */
   8017 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8018 		return false;
   8019 	}
   8020 
   8021 	return true;
   8022 }
   8023 
   8024 static inline bool
   8025 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8026     struct mbuf *m)
   8027 {
   8028 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8029 
   8030 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8031 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8032 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8033 	}
   8034 
   8035 	return true;
   8036 }
   8037 
   8038 static inline void
   8039 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8040     uint32_t errors, struct mbuf *m)
   8041 {
   8042 	struct wm_softc *sc = rxq->rxq_sc;
   8043 
   8044 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8045 		if (wm_rxdesc_is_set_status(sc, status,
   8046 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8047 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8048 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8049 			if (wm_rxdesc_is_set_error(sc, errors,
   8050 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8051 				m->m_pkthdr.csum_flags |=
   8052 					M_CSUM_IPv4_BAD;
   8053 		}
   8054 		if (wm_rxdesc_is_set_status(sc, status,
   8055 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8056 			/*
   8057 			 * Note: we don't know if this was TCP or UDP,
   8058 			 * so we just set both bits, and expect the
   8059 			 * upper layers to deal.
   8060 			 */
   8061 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8062 			m->m_pkthdr.csum_flags |=
   8063 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8064 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8065 			if (wm_rxdesc_is_set_error(sc, errors,
   8066 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8067 				m->m_pkthdr.csum_flags |=
   8068 					M_CSUM_TCP_UDP_BAD;
   8069 		}
   8070 	}
   8071 }
   8072 
   8073 /*
   8074  * wm_rxeof:
   8075  *
   8076  *	Helper; handle receive interrupts.
   8077  */
   8078 static void
   8079 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8080 {
   8081 	struct wm_softc *sc = rxq->rxq_sc;
   8082 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8083 	struct wm_rxsoft *rxs;
   8084 	struct mbuf *m;
   8085 	int i, len;
   8086 	int count = 0;
   8087 	uint32_t status, errors;
   8088 	uint16_t vlantag;
   8089 
   8090 	KASSERT(mutex_owned(rxq->rxq_lock));
   8091 
   8092 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8093 		if (limit-- == 0) {
   8094 			rxq->rxq_ptr = i;
   8095 			break;
   8096 		}
   8097 
   8098 		rxs = &rxq->rxq_soft[i];
   8099 
   8100 		DPRINTF(WM_DEBUG_RX,
   8101 		    ("%s: RX: checking descriptor %d\n",
   8102 		    device_xname(sc->sc_dev), i));
   8103 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8104 
   8105 		status = wm_rxdesc_get_status(rxq, i);
   8106 		errors = wm_rxdesc_get_errors(rxq, i);
   8107 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8108 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8109 #ifdef WM_DEBUG
   8110 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8111 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8112 #endif
   8113 
   8114 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8115 			/*
   8116 			 * Update the receive pointer holding rxq_lock
   8117 			 * consistent with increment counter.
   8118 			 */
   8119 			rxq->rxq_ptr = i;
   8120 			break;
   8121 		}
   8122 
   8123 		count++;
   8124 		if (__predict_false(rxq->rxq_discard)) {
   8125 			DPRINTF(WM_DEBUG_RX,
   8126 			    ("%s: RX: discarding contents of descriptor %d\n",
   8127 			    device_xname(sc->sc_dev), i));
   8128 			wm_init_rxdesc(rxq, i);
   8129 			if (wm_rxdesc_is_eop(rxq, status)) {
   8130 				/* Reset our state. */
   8131 				DPRINTF(WM_DEBUG_RX,
   8132 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8133 				    device_xname(sc->sc_dev)));
   8134 				rxq->rxq_discard = 0;
   8135 			}
   8136 			continue;
   8137 		}
   8138 
   8139 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8140 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8141 
   8142 		m = rxs->rxs_mbuf;
   8143 
   8144 		/*
   8145 		 * Add a new receive buffer to the ring, unless of
   8146 		 * course the length is zero. Treat the latter as a
   8147 		 * failed mapping.
   8148 		 */
   8149 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8150 			/*
   8151 			 * Failed, throw away what we've done so
   8152 			 * far, and discard the rest of the packet.
   8153 			 */
   8154 			ifp->if_ierrors++;
   8155 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8156 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8157 			wm_init_rxdesc(rxq, i);
   8158 			if (!wm_rxdesc_is_eop(rxq, status))
   8159 				rxq->rxq_discard = 1;
   8160 			if (rxq->rxq_head != NULL)
   8161 				m_freem(rxq->rxq_head);
   8162 			WM_RXCHAIN_RESET(rxq);
   8163 			DPRINTF(WM_DEBUG_RX,
   8164 			    ("%s: RX: Rx buffer allocation failed, "
   8165 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8166 			    rxq->rxq_discard ? " (discard)" : ""));
   8167 			continue;
   8168 		}
   8169 
   8170 		m->m_len = len;
   8171 		rxq->rxq_len += len;
   8172 		DPRINTF(WM_DEBUG_RX,
   8173 		    ("%s: RX: buffer at %p len %d\n",
   8174 		    device_xname(sc->sc_dev), m->m_data, len));
   8175 
   8176 		/* If this is not the end of the packet, keep looking. */
   8177 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8178 			WM_RXCHAIN_LINK(rxq, m);
   8179 			DPRINTF(WM_DEBUG_RX,
   8180 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8181 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8182 			continue;
   8183 		}
   8184 
   8185 		/*
   8186 		 * Okay, we have the entire packet now.  The chip is
   8187 		 * configured to include the FCS except I350 and I21[01]
   8188 		 * (not all chips can be configured to strip it),
   8189 		 * so we need to trim it.
   8190 		 * May need to adjust length of previous mbuf in the
   8191 		 * chain if the current mbuf is too short.
   8192 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8193 		 * is always set in I350, so we don't trim it.
   8194 		 */
   8195 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8196 		    && (sc->sc_type != WM_T_I210)
   8197 		    && (sc->sc_type != WM_T_I211)) {
   8198 			if (m->m_len < ETHER_CRC_LEN) {
   8199 				rxq->rxq_tail->m_len
   8200 				    -= (ETHER_CRC_LEN - m->m_len);
   8201 				m->m_len = 0;
   8202 			} else
   8203 				m->m_len -= ETHER_CRC_LEN;
   8204 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8205 		} else
   8206 			len = rxq->rxq_len;
   8207 
   8208 		WM_RXCHAIN_LINK(rxq, m);
   8209 
   8210 		*rxq->rxq_tailp = NULL;
   8211 		m = rxq->rxq_head;
   8212 
   8213 		WM_RXCHAIN_RESET(rxq);
   8214 
   8215 		DPRINTF(WM_DEBUG_RX,
   8216 		    ("%s: RX: have entire packet, len -> %d\n",
   8217 		    device_xname(sc->sc_dev), len));
   8218 
   8219 		/* If an error occurred, update stats and drop the packet. */
   8220 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8221 			m_freem(m);
   8222 			continue;
   8223 		}
   8224 
   8225 		/* No errors.  Receive the packet. */
   8226 		m_set_rcvif(m, ifp);
   8227 		m->m_pkthdr.len = len;
   8228 		/*
   8229 		 * TODO
   8230 		 * should be save rsshash and rsstype to this mbuf.
   8231 		 */
   8232 		DPRINTF(WM_DEBUG_RX,
   8233 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8234 			device_xname(sc->sc_dev), rsstype, rsshash));
   8235 
   8236 		/*
   8237 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8238 		 * for us.  Associate the tag with the packet.
   8239 		 */
   8240 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8241 			continue;
   8242 
   8243 		/* Set up checksum info for this packet. */
   8244 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8245 		/*
   8246 		 * Update the receive pointer holding rxq_lock consistent with
   8247 		 * increment counter.
   8248 		 */
   8249 		rxq->rxq_ptr = i;
   8250 		rxq->rxq_packets++;
   8251 		rxq->rxq_bytes += len;
   8252 		mutex_exit(rxq->rxq_lock);
   8253 
   8254 		/* Pass it on. */
   8255 		if_percpuq_enqueue(sc->sc_ipq, m);
   8256 
   8257 		mutex_enter(rxq->rxq_lock);
   8258 
   8259 		if (rxq->rxq_stopping)
   8260 			break;
   8261 	}
   8262 
   8263 	if (count != 0)
   8264 		rnd_add_uint32(&sc->rnd_source, count);
   8265 
   8266 	DPRINTF(WM_DEBUG_RX,
   8267 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8268 }
   8269 
   8270 /*
   8271  * wm_linkintr_gmii:
   8272  *
   8273  *	Helper; handle link interrupts for GMII.
   8274  */
   8275 static void
   8276 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8277 {
   8278 
   8279 	KASSERT(WM_CORE_LOCKED(sc));
   8280 
   8281 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8282 		__func__));
   8283 
   8284 	if (icr & ICR_LSC) {
   8285 		uint32_t reg;
   8286 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8287 
   8288 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8289 			wm_gig_downshift_workaround_ich8lan(sc);
   8290 
   8291 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8292 			device_xname(sc->sc_dev)));
   8293 		mii_pollstat(&sc->sc_mii);
   8294 		if (sc->sc_type == WM_T_82543) {
   8295 			int miistatus, active;
   8296 
   8297 			/*
   8298 			 * With 82543, we need to force speed and
   8299 			 * duplex on the MAC equal to what the PHY
   8300 			 * speed and duplex configuration is.
   8301 			 */
   8302 			miistatus = sc->sc_mii.mii_media_status;
   8303 
   8304 			if (miistatus & IFM_ACTIVE) {
   8305 				active = sc->sc_mii.mii_media_active;
   8306 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8307 				switch (IFM_SUBTYPE(active)) {
   8308 				case IFM_10_T:
   8309 					sc->sc_ctrl |= CTRL_SPEED_10;
   8310 					break;
   8311 				case IFM_100_TX:
   8312 					sc->sc_ctrl |= CTRL_SPEED_100;
   8313 					break;
   8314 				case IFM_1000_T:
   8315 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8316 					break;
   8317 				default:
   8318 					/*
   8319 					 * fiber?
   8320 					 * Shoud not enter here.
   8321 					 */
   8322 					printf("unknown media (%x)\n", active);
   8323 					break;
   8324 				}
   8325 				if (active & IFM_FDX)
   8326 					sc->sc_ctrl |= CTRL_FD;
   8327 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8328 			}
   8329 		} else if ((sc->sc_type == WM_T_ICH8)
   8330 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8331 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8332 		} else if (sc->sc_type == WM_T_PCH) {
   8333 			wm_k1_gig_workaround_hv(sc,
   8334 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8335 		}
   8336 
   8337 		if ((sc->sc_phytype == WMPHY_82578)
   8338 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8339 			== IFM_1000_T)) {
   8340 
   8341 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8342 				delay(200*1000); /* XXX too big */
   8343 
   8344 				/* Link stall fix for link up */
   8345 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8346 				    HV_MUX_DATA_CTRL,
   8347 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8348 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8349 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8350 				    HV_MUX_DATA_CTRL,
   8351 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8352 			}
   8353 		}
   8354 		/*
   8355 		 * I217 Packet Loss issue:
   8356 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8357 		 * on power up.
   8358 		 * Set the Beacon Duration for I217 to 8 usec
   8359 		 */
   8360 		if ((sc->sc_type == WM_T_PCH_LPT)
   8361 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8362 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8363 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8364 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8365 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8366 		}
   8367 
   8368 		/* XXX Work-around I218 hang issue */
   8369 		/* e1000_k1_workaround_lpt_lp() */
   8370 
   8371 		if ((sc->sc_type == WM_T_PCH_LPT)
   8372 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8373 			/*
   8374 			 * Set platform power management values for Latency
   8375 			 * Tolerance Reporting (LTR)
   8376 			 */
   8377 			wm_platform_pm_pch_lpt(sc,
   8378 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8379 				    != 0));
   8380 		}
   8381 
   8382 		/* FEXTNVM6 K1-off workaround */
   8383 		if (sc->sc_type == WM_T_PCH_SPT) {
   8384 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8385 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8386 			    & FEXTNVM6_K1_OFF_ENABLE)
   8387 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8388 			else
   8389 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8390 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8391 		}
   8392 	} else if (icr & ICR_RXSEQ) {
   8393 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8394 			device_xname(sc->sc_dev)));
   8395 	}
   8396 }
   8397 
   8398 /*
   8399  * wm_linkintr_tbi:
   8400  *
   8401  *	Helper; handle link interrupts for TBI mode.
   8402  */
   8403 static void
   8404 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8405 {
   8406 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8407 	uint32_t status;
   8408 
   8409 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8410 		__func__));
   8411 
   8412 	status = CSR_READ(sc, WMREG_STATUS);
   8413 	if (icr & ICR_LSC) {
   8414 		if (status & STATUS_LU) {
   8415 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8416 			    device_xname(sc->sc_dev),
   8417 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8418 			/*
   8419 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8420 			 * so we should update sc->sc_ctrl
   8421 			 */
   8422 
   8423 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8424 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8425 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8426 			if (status & STATUS_FD)
   8427 				sc->sc_tctl |=
   8428 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8429 			else
   8430 				sc->sc_tctl |=
   8431 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8432 			if (sc->sc_ctrl & CTRL_TFCE)
   8433 				sc->sc_fcrtl |= FCRTL_XONE;
   8434 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8435 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8436 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8437 				      sc->sc_fcrtl);
   8438 			sc->sc_tbi_linkup = 1;
   8439 			if_link_state_change(ifp, LINK_STATE_UP);
   8440 		} else {
   8441 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8442 			    device_xname(sc->sc_dev)));
   8443 			sc->sc_tbi_linkup = 0;
   8444 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8445 		}
   8446 		/* Update LED */
   8447 		wm_tbi_serdes_set_linkled(sc);
   8448 	} else if (icr & ICR_RXSEQ) {
   8449 		DPRINTF(WM_DEBUG_LINK,
   8450 		    ("%s: LINK: Receive sequence error\n",
   8451 		    device_xname(sc->sc_dev)));
   8452 	}
   8453 }
   8454 
   8455 /*
   8456  * wm_linkintr_serdes:
   8457  *
   8458  *	Helper; handle link interrupts for TBI mode.
   8459  */
   8460 static void
   8461 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8462 {
   8463 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8464 	struct mii_data *mii = &sc->sc_mii;
   8465 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8466 	uint32_t pcs_adv, pcs_lpab, reg;
   8467 
   8468 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8469 		__func__));
   8470 
   8471 	if (icr & ICR_LSC) {
   8472 		/* Check PCS */
   8473 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8474 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8475 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8476 				device_xname(sc->sc_dev)));
   8477 			mii->mii_media_status |= IFM_ACTIVE;
   8478 			sc->sc_tbi_linkup = 1;
   8479 			if_link_state_change(ifp, LINK_STATE_UP);
   8480 		} else {
   8481 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8482 				device_xname(sc->sc_dev)));
   8483 			mii->mii_media_status |= IFM_NONE;
   8484 			sc->sc_tbi_linkup = 0;
   8485 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8486 			wm_tbi_serdes_set_linkled(sc);
   8487 			return;
   8488 		}
   8489 		mii->mii_media_active |= IFM_1000_SX;
   8490 		if ((reg & PCS_LSTS_FDX) != 0)
   8491 			mii->mii_media_active |= IFM_FDX;
   8492 		else
   8493 			mii->mii_media_active |= IFM_HDX;
   8494 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8495 			/* Check flow */
   8496 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8497 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8498 				DPRINTF(WM_DEBUG_LINK,
   8499 				    ("XXX LINKOK but not ACOMP\n"));
   8500 				return;
   8501 			}
   8502 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8503 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8504 			DPRINTF(WM_DEBUG_LINK,
   8505 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8506 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8507 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8508 				mii->mii_media_active |= IFM_FLOW
   8509 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8510 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8511 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8512 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8513 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8514 				mii->mii_media_active |= IFM_FLOW
   8515 				    | IFM_ETH_TXPAUSE;
   8516 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8517 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8518 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8519 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8520 				mii->mii_media_active |= IFM_FLOW
   8521 				    | IFM_ETH_RXPAUSE;
   8522 		}
   8523 		/* Update LED */
   8524 		wm_tbi_serdes_set_linkled(sc);
   8525 	} else {
   8526 		DPRINTF(WM_DEBUG_LINK,
   8527 		    ("%s: LINK: Receive sequence error\n",
   8528 		    device_xname(sc->sc_dev)));
   8529 	}
   8530 }
   8531 
   8532 /*
   8533  * wm_linkintr:
   8534  *
   8535  *	Helper; handle link interrupts.
   8536  */
   8537 static void
   8538 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8539 {
   8540 
   8541 	KASSERT(WM_CORE_LOCKED(sc));
   8542 
   8543 	if (sc->sc_flags & WM_F_HAS_MII)
   8544 		wm_linkintr_gmii(sc, icr);
   8545 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8546 	    && (sc->sc_type >= WM_T_82575))
   8547 		wm_linkintr_serdes(sc, icr);
   8548 	else
   8549 		wm_linkintr_tbi(sc, icr);
   8550 }
   8551 
   8552 /*
   8553  * wm_intr_legacy:
   8554  *
   8555  *	Interrupt service routine for INTx and MSI.
   8556  */
   8557 static int
   8558 wm_intr_legacy(void *arg)
   8559 {
   8560 	struct wm_softc *sc = arg;
   8561 	struct wm_queue *wmq = &sc->sc_queue[0];
   8562 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8563 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8564 	uint32_t icr, rndval = 0;
   8565 	int handled = 0;
   8566 
   8567 	while (1 /* CONSTCOND */) {
   8568 		icr = CSR_READ(sc, WMREG_ICR);
   8569 		if ((icr & sc->sc_icr) == 0)
   8570 			break;
   8571 		if (handled == 0) {
   8572 			DPRINTF(WM_DEBUG_TX,
   8573 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8574 		}
   8575 		if (rndval == 0)
   8576 			rndval = icr;
   8577 
   8578 		mutex_enter(rxq->rxq_lock);
   8579 
   8580 		if (rxq->rxq_stopping) {
   8581 			mutex_exit(rxq->rxq_lock);
   8582 			break;
   8583 		}
   8584 
   8585 		handled = 1;
   8586 
   8587 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8588 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8589 			DPRINTF(WM_DEBUG_RX,
   8590 			    ("%s: RX: got Rx intr 0x%08x\n",
   8591 			    device_xname(sc->sc_dev),
   8592 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8593 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8594 		}
   8595 #endif
   8596 		wm_rxeof(rxq, UINT_MAX);
   8597 
   8598 		mutex_exit(rxq->rxq_lock);
   8599 		mutex_enter(txq->txq_lock);
   8600 
   8601 		if (txq->txq_stopping) {
   8602 			mutex_exit(txq->txq_lock);
   8603 			break;
   8604 		}
   8605 
   8606 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8607 		if (icr & ICR_TXDW) {
   8608 			DPRINTF(WM_DEBUG_TX,
   8609 			    ("%s: TX: got TXDW interrupt\n",
   8610 			    device_xname(sc->sc_dev)));
   8611 			WM_Q_EVCNT_INCR(txq, txdw);
   8612 		}
   8613 #endif
   8614 		wm_txeof(sc, txq);
   8615 
   8616 		mutex_exit(txq->txq_lock);
   8617 		WM_CORE_LOCK(sc);
   8618 
   8619 		if (sc->sc_core_stopping) {
   8620 			WM_CORE_UNLOCK(sc);
   8621 			break;
   8622 		}
   8623 
   8624 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8625 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8626 			wm_linkintr(sc, icr);
   8627 		}
   8628 
   8629 		WM_CORE_UNLOCK(sc);
   8630 
   8631 		if (icr & ICR_RXO) {
   8632 #if defined(WM_DEBUG)
   8633 			log(LOG_WARNING, "%s: Receive overrun\n",
   8634 			    device_xname(sc->sc_dev));
   8635 #endif /* defined(WM_DEBUG) */
   8636 		}
   8637 	}
   8638 
   8639 	rnd_add_uint32(&sc->rnd_source, rndval);
   8640 
   8641 	if (handled) {
   8642 		/* Try to get more packets going. */
   8643 		softint_schedule(wmq->wmq_si);
   8644 	}
   8645 
   8646 	return handled;
   8647 }
   8648 
   8649 static inline void
   8650 wm_txrxintr_disable(struct wm_queue *wmq)
   8651 {
   8652 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8653 
   8654 	if (sc->sc_type == WM_T_82574)
   8655 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8656 	else if (sc->sc_type == WM_T_82575)
   8657 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8658 	else
   8659 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8660 }
   8661 
   8662 static inline void
   8663 wm_txrxintr_enable(struct wm_queue *wmq)
   8664 {
   8665 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8666 
   8667 	wm_itrs_calculate(sc, wmq);
   8668 
   8669 	if (sc->sc_type == WM_T_82574)
   8670 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8671 	else if (sc->sc_type == WM_T_82575)
   8672 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8673 	else
   8674 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8675 }
   8676 
   8677 static int
   8678 wm_txrxintr_msix(void *arg)
   8679 {
   8680 	struct wm_queue *wmq = arg;
   8681 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8682 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8683 	struct wm_softc *sc = txq->txq_sc;
   8684 	u_int limit = sc->sc_rx_intr_process_limit;
   8685 
   8686 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8687 
   8688 	DPRINTF(WM_DEBUG_TX,
   8689 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8690 
   8691 	wm_txrxintr_disable(wmq);
   8692 
   8693 	mutex_enter(txq->txq_lock);
   8694 
   8695 	if (txq->txq_stopping) {
   8696 		mutex_exit(txq->txq_lock);
   8697 		return 0;
   8698 	}
   8699 
   8700 	WM_Q_EVCNT_INCR(txq, txdw);
   8701 	wm_txeof(sc, txq);
   8702 	/* wm_deferred start() is done in wm_handle_queue(). */
   8703 	mutex_exit(txq->txq_lock);
   8704 
   8705 	DPRINTF(WM_DEBUG_RX,
   8706 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8707 	mutex_enter(rxq->rxq_lock);
   8708 
   8709 	if (rxq->rxq_stopping) {
   8710 		mutex_exit(rxq->rxq_lock);
   8711 		return 0;
   8712 	}
   8713 
   8714 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8715 	wm_rxeof(rxq, limit);
   8716 	mutex_exit(rxq->rxq_lock);
   8717 
   8718 	wm_itrs_writereg(sc, wmq);
   8719 
   8720 	softint_schedule(wmq->wmq_si);
   8721 
   8722 	return 1;
   8723 }
   8724 
   8725 static void
   8726 wm_handle_queue(void *arg)
   8727 {
   8728 	struct wm_queue *wmq = arg;
   8729 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8730 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8731 	struct wm_softc *sc = txq->txq_sc;
   8732 	u_int limit = sc->sc_rx_process_limit;
   8733 
   8734 	mutex_enter(txq->txq_lock);
   8735 	if (txq->txq_stopping) {
   8736 		mutex_exit(txq->txq_lock);
   8737 		return;
   8738 	}
   8739 	wm_txeof(sc, txq);
   8740 	wm_deferred_start_locked(txq);
   8741 	mutex_exit(txq->txq_lock);
   8742 
   8743 	mutex_enter(rxq->rxq_lock);
   8744 	if (rxq->rxq_stopping) {
   8745 		mutex_exit(rxq->rxq_lock);
   8746 		return;
   8747 	}
   8748 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8749 	wm_rxeof(rxq, limit);
   8750 	mutex_exit(rxq->rxq_lock);
   8751 
   8752 	wm_txrxintr_enable(wmq);
   8753 }
   8754 
   8755 /*
   8756  * wm_linkintr_msix:
   8757  *
   8758  *	Interrupt service routine for link status change for MSI-X.
   8759  */
   8760 static int
   8761 wm_linkintr_msix(void *arg)
   8762 {
   8763 	struct wm_softc *sc = arg;
   8764 	uint32_t reg;
   8765 
   8766 	DPRINTF(WM_DEBUG_LINK,
   8767 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8768 
   8769 	reg = CSR_READ(sc, WMREG_ICR);
   8770 	WM_CORE_LOCK(sc);
   8771 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8772 		goto out;
   8773 
   8774 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8775 	wm_linkintr(sc, ICR_LSC);
   8776 
   8777 out:
   8778 	WM_CORE_UNLOCK(sc);
   8779 
   8780 	if (sc->sc_type == WM_T_82574)
   8781 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8782 	else if (sc->sc_type == WM_T_82575)
   8783 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8784 	else
   8785 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8786 
   8787 	return 1;
   8788 }
   8789 
   8790 /*
   8791  * Media related.
   8792  * GMII, SGMII, TBI (and SERDES)
   8793  */
   8794 
   8795 /* Common */
   8796 
   8797 /*
   8798  * wm_tbi_serdes_set_linkled:
   8799  *
   8800  *	Update the link LED on TBI and SERDES devices.
   8801  */
   8802 static void
   8803 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8804 {
   8805 
   8806 	if (sc->sc_tbi_linkup)
   8807 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8808 	else
   8809 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8810 
   8811 	/* 82540 or newer devices are active low */
   8812 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8813 
   8814 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8815 }
   8816 
   8817 /* GMII related */
   8818 
   8819 /*
   8820  * wm_gmii_reset:
   8821  *
   8822  *	Reset the PHY.
   8823  */
   8824 static void
   8825 wm_gmii_reset(struct wm_softc *sc)
   8826 {
   8827 	uint32_t reg;
   8828 	int rv;
   8829 
   8830 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8831 		device_xname(sc->sc_dev), __func__));
   8832 
   8833 	rv = sc->phy.acquire(sc);
   8834 	if (rv != 0) {
   8835 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8836 		    __func__);
   8837 		return;
   8838 	}
   8839 
   8840 	switch (sc->sc_type) {
   8841 	case WM_T_82542_2_0:
   8842 	case WM_T_82542_2_1:
   8843 		/* null */
   8844 		break;
   8845 	case WM_T_82543:
   8846 		/*
   8847 		 * With 82543, we need to force speed and duplex on the MAC
   8848 		 * equal to what the PHY speed and duplex configuration is.
   8849 		 * In addition, we need to perform a hardware reset on the PHY
   8850 		 * to take it out of reset.
   8851 		 */
   8852 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8853 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8854 
   8855 		/* The PHY reset pin is active-low. */
   8856 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8857 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8858 		    CTRL_EXT_SWDPIN(4));
   8859 		reg |= CTRL_EXT_SWDPIO(4);
   8860 
   8861 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8862 		CSR_WRITE_FLUSH(sc);
   8863 		delay(10*1000);
   8864 
   8865 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8866 		CSR_WRITE_FLUSH(sc);
   8867 		delay(150);
   8868 #if 0
   8869 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8870 #endif
   8871 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8872 		break;
   8873 	case WM_T_82544:	/* reset 10000us */
   8874 	case WM_T_82540:
   8875 	case WM_T_82545:
   8876 	case WM_T_82545_3:
   8877 	case WM_T_82546:
   8878 	case WM_T_82546_3:
   8879 	case WM_T_82541:
   8880 	case WM_T_82541_2:
   8881 	case WM_T_82547:
   8882 	case WM_T_82547_2:
   8883 	case WM_T_82571:	/* reset 100us */
   8884 	case WM_T_82572:
   8885 	case WM_T_82573:
   8886 	case WM_T_82574:
   8887 	case WM_T_82575:
   8888 	case WM_T_82576:
   8889 	case WM_T_82580:
   8890 	case WM_T_I350:
   8891 	case WM_T_I354:
   8892 	case WM_T_I210:
   8893 	case WM_T_I211:
   8894 	case WM_T_82583:
   8895 	case WM_T_80003:
   8896 		/* generic reset */
   8897 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8898 		CSR_WRITE_FLUSH(sc);
   8899 		delay(20000);
   8900 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8901 		CSR_WRITE_FLUSH(sc);
   8902 		delay(20000);
   8903 
   8904 		if ((sc->sc_type == WM_T_82541)
   8905 		    || (sc->sc_type == WM_T_82541_2)
   8906 		    || (sc->sc_type == WM_T_82547)
   8907 		    || (sc->sc_type == WM_T_82547_2)) {
   8908 			/* workaround for igp are done in igp_reset() */
   8909 			/* XXX add code to set LED after phy reset */
   8910 		}
   8911 		break;
   8912 	case WM_T_ICH8:
   8913 	case WM_T_ICH9:
   8914 	case WM_T_ICH10:
   8915 	case WM_T_PCH:
   8916 	case WM_T_PCH2:
   8917 	case WM_T_PCH_LPT:
   8918 	case WM_T_PCH_SPT:
   8919 		/* generic reset */
   8920 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8921 		CSR_WRITE_FLUSH(sc);
   8922 		delay(100);
   8923 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8924 		CSR_WRITE_FLUSH(sc);
   8925 		delay(150);
   8926 		break;
   8927 	default:
   8928 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8929 		    __func__);
   8930 		break;
   8931 	}
   8932 
   8933 	sc->phy.release(sc);
   8934 
   8935 	/* get_cfg_done */
   8936 	wm_get_cfg_done(sc);
   8937 
   8938 	/* extra setup */
   8939 	switch (sc->sc_type) {
   8940 	case WM_T_82542_2_0:
   8941 	case WM_T_82542_2_1:
   8942 	case WM_T_82543:
   8943 	case WM_T_82544:
   8944 	case WM_T_82540:
   8945 	case WM_T_82545:
   8946 	case WM_T_82545_3:
   8947 	case WM_T_82546:
   8948 	case WM_T_82546_3:
   8949 	case WM_T_82541_2:
   8950 	case WM_T_82547_2:
   8951 	case WM_T_82571:
   8952 	case WM_T_82572:
   8953 	case WM_T_82573:
   8954 	case WM_T_82575:
   8955 	case WM_T_82576:
   8956 	case WM_T_82580:
   8957 	case WM_T_I350:
   8958 	case WM_T_I354:
   8959 	case WM_T_I210:
   8960 	case WM_T_I211:
   8961 	case WM_T_80003:
   8962 		/* null */
   8963 		break;
   8964 	case WM_T_82574:
   8965 	case WM_T_82583:
   8966 		wm_lplu_d0_disable(sc);
   8967 		break;
   8968 	case WM_T_82541:
   8969 	case WM_T_82547:
   8970 		/* XXX Configure actively LED after PHY reset */
   8971 		break;
   8972 	case WM_T_ICH8:
   8973 	case WM_T_ICH9:
   8974 	case WM_T_ICH10:
   8975 	case WM_T_PCH:
   8976 	case WM_T_PCH2:
   8977 	case WM_T_PCH_LPT:
   8978 	case WM_T_PCH_SPT:
   8979 		wm_phy_post_reset(sc);
   8980 		break;
   8981 	default:
   8982 		panic("%s: unknown type\n", __func__);
   8983 		break;
   8984 	}
   8985 }
   8986 
   8987 /*
   8988  * Setup sc_phytype and mii_{read|write}reg.
   8989  *
   8990  *  To identify PHY type, correct read/write function should be selected.
   8991  * To select correct read/write function, PCI ID or MAC type are required
   8992  * without accessing PHY registers.
   8993  *
   8994  *  On the first call of this function, PHY ID is not known yet. Check
   8995  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8996  * result might be incorrect.
   8997  *
   8998  *  In the second call, PHY OUI and model is used to identify PHY type.
   8999  * It might not be perfpect because of the lack of compared entry, but it
   9000  * would be better than the first call.
   9001  *
   9002  *  If the detected new result and previous assumption is different,
   9003  * diagnous message will be printed.
   9004  */
   9005 static void
   9006 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9007     uint16_t phy_model)
   9008 {
   9009 	device_t dev = sc->sc_dev;
   9010 	struct mii_data *mii = &sc->sc_mii;
   9011 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9012 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9013 	mii_readreg_t new_readreg;
   9014 	mii_writereg_t new_writereg;
   9015 
   9016 	if (mii->mii_readreg == NULL) {
   9017 		/*
   9018 		 *  This is the first call of this function. For ICH and PCH
   9019 		 * variants, it's difficult to determine the PHY access method
   9020 		 * by sc_type, so use the PCI product ID for some devices.
   9021 		 */
   9022 
   9023 		switch (sc->sc_pcidevid) {
   9024 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9025 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9026 			/* 82577 */
   9027 			new_phytype = WMPHY_82577;
   9028 			break;
   9029 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9030 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9031 			/* 82578 */
   9032 			new_phytype = WMPHY_82578;
   9033 			break;
   9034 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9035 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9036 			/* 82579 */
   9037 			new_phytype = WMPHY_82579;
   9038 			break;
   9039 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9040 		case PCI_PRODUCT_INTEL_82801I_BM:
   9041 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9042 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9043 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9044 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9045 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9046 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9047 			/* ICH8, 9, 10 with 82567 */
   9048 			new_phytype = WMPHY_BM;
   9049 			break;
   9050 		default:
   9051 			break;
   9052 		}
   9053 	} else {
   9054 		/* It's not the first call. Use PHY OUI and model */
   9055 		switch (phy_oui) {
   9056 		case MII_OUI_ATHEROS: /* XXX ??? */
   9057 			switch (phy_model) {
   9058 			case 0x0004: /* XXX */
   9059 				new_phytype = WMPHY_82578;
   9060 				break;
   9061 			default:
   9062 				break;
   9063 			}
   9064 			break;
   9065 		case MII_OUI_xxMARVELL:
   9066 			switch (phy_model) {
   9067 			case MII_MODEL_xxMARVELL_I210:
   9068 				new_phytype = WMPHY_I210;
   9069 				break;
   9070 			case MII_MODEL_xxMARVELL_E1011:
   9071 			case MII_MODEL_xxMARVELL_E1000_3:
   9072 			case MII_MODEL_xxMARVELL_E1000_5:
   9073 			case MII_MODEL_xxMARVELL_E1112:
   9074 				new_phytype = WMPHY_M88;
   9075 				break;
   9076 			case MII_MODEL_xxMARVELL_E1149:
   9077 				new_phytype = WMPHY_BM;
   9078 				break;
   9079 			case MII_MODEL_xxMARVELL_E1111:
   9080 			case MII_MODEL_xxMARVELL_I347:
   9081 			case MII_MODEL_xxMARVELL_E1512:
   9082 			case MII_MODEL_xxMARVELL_E1340M:
   9083 			case MII_MODEL_xxMARVELL_E1543:
   9084 				new_phytype = WMPHY_M88;
   9085 				break;
   9086 			case MII_MODEL_xxMARVELL_I82563:
   9087 				new_phytype = WMPHY_GG82563;
   9088 				break;
   9089 			default:
   9090 				break;
   9091 			}
   9092 			break;
   9093 		case MII_OUI_INTEL:
   9094 			switch (phy_model) {
   9095 			case MII_MODEL_INTEL_I82577:
   9096 				new_phytype = WMPHY_82577;
   9097 				break;
   9098 			case MII_MODEL_INTEL_I82579:
   9099 				new_phytype = WMPHY_82579;
   9100 				break;
   9101 			case MII_MODEL_INTEL_I217:
   9102 				new_phytype = WMPHY_I217;
   9103 				break;
   9104 			case MII_MODEL_INTEL_I82580:
   9105 			case MII_MODEL_INTEL_I350:
   9106 				new_phytype = WMPHY_82580;
   9107 				break;
   9108 			default:
   9109 				break;
   9110 			}
   9111 			break;
   9112 		case MII_OUI_yyINTEL:
   9113 			switch (phy_model) {
   9114 			case MII_MODEL_yyINTEL_I82562G:
   9115 			case MII_MODEL_yyINTEL_I82562EM:
   9116 			case MII_MODEL_yyINTEL_I82562ET:
   9117 				new_phytype = WMPHY_IFE;
   9118 				break;
   9119 			case MII_MODEL_yyINTEL_IGP01E1000:
   9120 				new_phytype = WMPHY_IGP;
   9121 				break;
   9122 			case MII_MODEL_yyINTEL_I82566:
   9123 				new_phytype = WMPHY_IGP_3;
   9124 				break;
   9125 			default:
   9126 				break;
   9127 			}
   9128 			break;
   9129 		default:
   9130 			break;
   9131 		}
   9132 		if (new_phytype == WMPHY_UNKNOWN)
   9133 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9134 			    __func__);
   9135 
   9136 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9137 		    && (sc->sc_phytype != new_phytype )) {
   9138 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9139 			    "was incorrect. PHY type from PHY ID = %u\n",
   9140 			    sc->sc_phytype, new_phytype);
   9141 		}
   9142 	}
   9143 
   9144 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9145 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9146 		/* SGMII */
   9147 		new_readreg = wm_sgmii_readreg;
   9148 		new_writereg = wm_sgmii_writereg;
   9149 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9150 		/* BM2 (phyaddr == 1) */
   9151 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9152 		    && (new_phytype != WMPHY_BM)
   9153 		    && (new_phytype != WMPHY_UNKNOWN))
   9154 			doubt_phytype = new_phytype;
   9155 		new_phytype = WMPHY_BM;
   9156 		new_readreg = wm_gmii_bm_readreg;
   9157 		new_writereg = wm_gmii_bm_writereg;
   9158 	} else if (sc->sc_type >= WM_T_PCH) {
   9159 		/* All PCH* use _hv_ */
   9160 		new_readreg = wm_gmii_hv_readreg;
   9161 		new_writereg = wm_gmii_hv_writereg;
   9162 	} else if (sc->sc_type >= WM_T_ICH8) {
   9163 		/* non-82567 ICH8, 9 and 10 */
   9164 		new_readreg = wm_gmii_i82544_readreg;
   9165 		new_writereg = wm_gmii_i82544_writereg;
   9166 	} else if (sc->sc_type >= WM_T_80003) {
   9167 		/* 80003 */
   9168 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9169 		    && (new_phytype != WMPHY_GG82563)
   9170 		    && (new_phytype != WMPHY_UNKNOWN))
   9171 			doubt_phytype = new_phytype;
   9172 		new_phytype = WMPHY_GG82563;
   9173 		new_readreg = wm_gmii_i80003_readreg;
   9174 		new_writereg = wm_gmii_i80003_writereg;
   9175 	} else if (sc->sc_type >= WM_T_I210) {
   9176 		/* I210 and I211 */
   9177 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9178 		    && (new_phytype != WMPHY_I210)
   9179 		    && (new_phytype != WMPHY_UNKNOWN))
   9180 			doubt_phytype = new_phytype;
   9181 		new_phytype = WMPHY_I210;
   9182 		new_readreg = wm_gmii_gs40g_readreg;
   9183 		new_writereg = wm_gmii_gs40g_writereg;
   9184 	} else if (sc->sc_type >= WM_T_82580) {
   9185 		/* 82580, I350 and I354 */
   9186 		new_readreg = wm_gmii_82580_readreg;
   9187 		new_writereg = wm_gmii_82580_writereg;
   9188 	} else if (sc->sc_type >= WM_T_82544) {
   9189 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9190 		new_readreg = wm_gmii_i82544_readreg;
   9191 		new_writereg = wm_gmii_i82544_writereg;
   9192 	} else {
   9193 		new_readreg = wm_gmii_i82543_readreg;
   9194 		new_writereg = wm_gmii_i82543_writereg;
   9195 	}
   9196 
   9197 	if (new_phytype == WMPHY_BM) {
   9198 		/* All BM use _bm_ */
   9199 		new_readreg = wm_gmii_bm_readreg;
   9200 		new_writereg = wm_gmii_bm_writereg;
   9201 	}
   9202 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9203 		/* All PCH* use _hv_ */
   9204 		new_readreg = wm_gmii_hv_readreg;
   9205 		new_writereg = wm_gmii_hv_writereg;
   9206 	}
   9207 
   9208 	/* Diag output */
   9209 	if (doubt_phytype != WMPHY_UNKNOWN)
   9210 		aprint_error_dev(dev, "Assumed new PHY type was "
   9211 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9212 		    new_phytype);
   9213 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9214 	    && (sc->sc_phytype != new_phytype ))
   9215 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9216 		    "was incorrect. New PHY type = %u\n",
   9217 		    sc->sc_phytype, new_phytype);
   9218 
   9219 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9220 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9221 
   9222 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9223 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9224 		    "function was incorrect.\n");
   9225 
   9226 	/* Update now */
   9227 	sc->sc_phytype = new_phytype;
   9228 	mii->mii_readreg = new_readreg;
   9229 	mii->mii_writereg = new_writereg;
   9230 }
   9231 
   9232 /*
   9233  * wm_get_phy_id_82575:
   9234  *
   9235  * Return PHY ID. Return -1 if it failed.
   9236  */
   9237 static int
   9238 wm_get_phy_id_82575(struct wm_softc *sc)
   9239 {
   9240 	uint32_t reg;
   9241 	int phyid = -1;
   9242 
   9243 	/* XXX */
   9244 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9245 		return -1;
   9246 
   9247 	if (wm_sgmii_uses_mdio(sc)) {
   9248 		switch (sc->sc_type) {
   9249 		case WM_T_82575:
   9250 		case WM_T_82576:
   9251 			reg = CSR_READ(sc, WMREG_MDIC);
   9252 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9253 			break;
   9254 		case WM_T_82580:
   9255 		case WM_T_I350:
   9256 		case WM_T_I354:
   9257 		case WM_T_I210:
   9258 		case WM_T_I211:
   9259 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9260 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9261 			break;
   9262 		default:
   9263 			return -1;
   9264 		}
   9265 	}
   9266 
   9267 	return phyid;
   9268 }
   9269 
   9270 
   9271 /*
   9272  * wm_gmii_mediainit:
   9273  *
   9274  *	Initialize media for use on 1000BASE-T devices.
   9275  */
   9276 static void
   9277 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9278 {
   9279 	device_t dev = sc->sc_dev;
   9280 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9281 	struct mii_data *mii = &sc->sc_mii;
   9282 	uint32_t reg;
   9283 
   9284 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9285 		device_xname(sc->sc_dev), __func__));
   9286 
   9287 	/* We have GMII. */
   9288 	sc->sc_flags |= WM_F_HAS_MII;
   9289 
   9290 	if (sc->sc_type == WM_T_80003)
   9291 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9292 	else
   9293 		sc->sc_tipg = TIPG_1000T_DFLT;
   9294 
   9295 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9296 	if ((sc->sc_type == WM_T_82580)
   9297 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9298 	    || (sc->sc_type == WM_T_I211)) {
   9299 		reg = CSR_READ(sc, WMREG_PHPM);
   9300 		reg &= ~PHPM_GO_LINK_D;
   9301 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9302 	}
   9303 
   9304 	/*
   9305 	 * Let the chip set speed/duplex on its own based on
   9306 	 * signals from the PHY.
   9307 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9308 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9309 	 */
   9310 	sc->sc_ctrl |= CTRL_SLU;
   9311 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9312 
   9313 	/* Initialize our media structures and probe the GMII. */
   9314 	mii->mii_ifp = ifp;
   9315 
   9316 	/*
   9317 	 * The first call of wm_mii_setup_phytype. The result might be
   9318 	 * incorrect.
   9319 	 */
   9320 	wm_gmii_setup_phytype(sc, 0, 0);
   9321 
   9322 	mii->mii_statchg = wm_gmii_statchg;
   9323 
   9324 	/* get PHY control from SMBus to PCIe */
   9325 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9326 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9327 		wm_smbustopci(sc);
   9328 
   9329 	wm_gmii_reset(sc);
   9330 
   9331 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9332 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9333 	    wm_gmii_mediastatus);
   9334 
   9335 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9336 	    || (sc->sc_type == WM_T_82580)
   9337 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9338 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9339 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9340 			/* Attach only one port */
   9341 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9342 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9343 		} else {
   9344 			int i, id;
   9345 			uint32_t ctrl_ext;
   9346 
   9347 			id = wm_get_phy_id_82575(sc);
   9348 			if (id != -1) {
   9349 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9350 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9351 			}
   9352 			if ((id == -1)
   9353 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9354 				/* Power on sgmii phy if it is disabled */
   9355 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9356 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9357 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9358 				CSR_WRITE_FLUSH(sc);
   9359 				delay(300*1000); /* XXX too long */
   9360 
   9361 				/* from 1 to 8 */
   9362 				for (i = 1; i < 8; i++)
   9363 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9364 					    0xffffffff, i, MII_OFFSET_ANY,
   9365 					    MIIF_DOPAUSE);
   9366 
   9367 				/* restore previous sfp cage power state */
   9368 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9369 			}
   9370 		}
   9371 	} else {
   9372 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9373 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9374 	}
   9375 
   9376 	/*
   9377 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9378 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9379 	 */
   9380 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9381 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9382 		wm_set_mdio_slow_mode_hv(sc);
   9383 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9384 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9385 	}
   9386 
   9387 	/*
   9388 	 * (For ICH8 variants)
   9389 	 * If PHY detection failed, use BM's r/w function and retry.
   9390 	 */
   9391 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9392 		/* if failed, retry with *_bm_* */
   9393 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9394 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9395 		    sc->sc_phytype);
   9396 		sc->sc_phytype = WMPHY_BM;
   9397 		mii->mii_readreg = wm_gmii_bm_readreg;
   9398 		mii->mii_writereg = wm_gmii_bm_writereg;
   9399 
   9400 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9401 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9402 	}
   9403 
   9404 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9405 		/* Any PHY wasn't find */
   9406 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9407 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9408 		sc->sc_phytype = WMPHY_NONE;
   9409 	} else {
   9410 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9411 
   9412 		/*
   9413 		 * PHY Found! Check PHY type again by the second call of
   9414 		 * wm_mii_setup_phytype.
   9415 		 */
   9416 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9417 		    child->mii_mpd_model);
   9418 
   9419 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9420 	}
   9421 }
   9422 
   9423 /*
   9424  * wm_gmii_mediachange:	[ifmedia interface function]
   9425  *
   9426  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9427  */
   9428 static int
   9429 wm_gmii_mediachange(struct ifnet *ifp)
   9430 {
   9431 	struct wm_softc *sc = ifp->if_softc;
   9432 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9433 	int rc;
   9434 
   9435 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9436 		device_xname(sc->sc_dev), __func__));
   9437 	if ((ifp->if_flags & IFF_UP) == 0)
   9438 		return 0;
   9439 
   9440 	/* Disable D0 LPLU. */
   9441 	if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   9442 		wm_lplu_d0_disable_pch(sc);
   9443 	else
   9444 		wm_lplu_d0_disable(sc);	/* ICH* */
   9445 
   9446 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9447 	sc->sc_ctrl |= CTRL_SLU;
   9448 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9449 	    || (sc->sc_type > WM_T_82543)) {
   9450 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9451 	} else {
   9452 		sc->sc_ctrl &= ~CTRL_ASDE;
   9453 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9454 		if (ife->ifm_media & IFM_FDX)
   9455 			sc->sc_ctrl |= CTRL_FD;
   9456 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9457 		case IFM_10_T:
   9458 			sc->sc_ctrl |= CTRL_SPEED_10;
   9459 			break;
   9460 		case IFM_100_TX:
   9461 			sc->sc_ctrl |= CTRL_SPEED_100;
   9462 			break;
   9463 		case IFM_1000_T:
   9464 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9465 			break;
   9466 		default:
   9467 			panic("wm_gmii_mediachange: bad media 0x%x",
   9468 			    ife->ifm_media);
   9469 		}
   9470 	}
   9471 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9472 	CSR_WRITE_FLUSH(sc);
   9473 	if (sc->sc_type <= WM_T_82543)
   9474 		wm_gmii_reset(sc);
   9475 
   9476 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9477 		return 0;
   9478 	return rc;
   9479 }
   9480 
   9481 /*
   9482  * wm_gmii_mediastatus:	[ifmedia interface function]
   9483  *
   9484  *	Get the current interface media status on a 1000BASE-T device.
   9485  */
   9486 static void
   9487 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9488 {
   9489 	struct wm_softc *sc = ifp->if_softc;
   9490 
   9491 	ether_mediastatus(ifp, ifmr);
   9492 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9493 	    | sc->sc_flowflags;
   9494 }
   9495 
   9496 #define	MDI_IO		CTRL_SWDPIN(2)
   9497 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9498 #define	MDI_CLK		CTRL_SWDPIN(3)
   9499 
   9500 static void
   9501 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9502 {
   9503 	uint32_t i, v;
   9504 
   9505 	v = CSR_READ(sc, WMREG_CTRL);
   9506 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9507 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9508 
   9509 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9510 		if (data & i)
   9511 			v |= MDI_IO;
   9512 		else
   9513 			v &= ~MDI_IO;
   9514 		CSR_WRITE(sc, WMREG_CTRL, v);
   9515 		CSR_WRITE_FLUSH(sc);
   9516 		delay(10);
   9517 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9518 		CSR_WRITE_FLUSH(sc);
   9519 		delay(10);
   9520 		CSR_WRITE(sc, WMREG_CTRL, v);
   9521 		CSR_WRITE_FLUSH(sc);
   9522 		delay(10);
   9523 	}
   9524 }
   9525 
   9526 static uint32_t
   9527 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9528 {
   9529 	uint32_t v, i, data = 0;
   9530 
   9531 	v = CSR_READ(sc, WMREG_CTRL);
   9532 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9533 	v |= CTRL_SWDPIO(3);
   9534 
   9535 	CSR_WRITE(sc, WMREG_CTRL, v);
   9536 	CSR_WRITE_FLUSH(sc);
   9537 	delay(10);
   9538 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9539 	CSR_WRITE_FLUSH(sc);
   9540 	delay(10);
   9541 	CSR_WRITE(sc, WMREG_CTRL, v);
   9542 	CSR_WRITE_FLUSH(sc);
   9543 	delay(10);
   9544 
   9545 	for (i = 0; i < 16; i++) {
   9546 		data <<= 1;
   9547 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9548 		CSR_WRITE_FLUSH(sc);
   9549 		delay(10);
   9550 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9551 			data |= 1;
   9552 		CSR_WRITE(sc, WMREG_CTRL, v);
   9553 		CSR_WRITE_FLUSH(sc);
   9554 		delay(10);
   9555 	}
   9556 
   9557 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9558 	CSR_WRITE_FLUSH(sc);
   9559 	delay(10);
   9560 	CSR_WRITE(sc, WMREG_CTRL, v);
   9561 	CSR_WRITE_FLUSH(sc);
   9562 	delay(10);
   9563 
   9564 	return data;
   9565 }
   9566 
   9567 #undef MDI_IO
   9568 #undef MDI_DIR
   9569 #undef MDI_CLK
   9570 
   9571 /*
   9572  * wm_gmii_i82543_readreg:	[mii interface function]
   9573  *
   9574  *	Read a PHY register on the GMII (i82543 version).
   9575  */
   9576 static int
   9577 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9578 {
   9579 	struct wm_softc *sc = device_private(self);
   9580 	int rv;
   9581 
   9582 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9583 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9584 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9585 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9586 
   9587 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9588 	    device_xname(sc->sc_dev), phy, reg, rv));
   9589 
   9590 	return rv;
   9591 }
   9592 
   9593 /*
   9594  * wm_gmii_i82543_writereg:	[mii interface function]
   9595  *
   9596  *	Write a PHY register on the GMII (i82543 version).
   9597  */
   9598 static void
   9599 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9600 {
   9601 	struct wm_softc *sc = device_private(self);
   9602 
   9603 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9604 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9605 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9606 	    (MII_COMMAND_START << 30), 32);
   9607 }
   9608 
   9609 /*
   9610  * wm_gmii_mdic_readreg:	[mii interface function]
   9611  *
   9612  *	Read a PHY register on the GMII.
   9613  */
   9614 static int
   9615 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9616 {
   9617 	struct wm_softc *sc = device_private(self);
   9618 	uint32_t mdic = 0;
   9619 	int i, rv;
   9620 
   9621 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9622 	    MDIC_REGADD(reg));
   9623 
   9624 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9625 		mdic = CSR_READ(sc, WMREG_MDIC);
   9626 		if (mdic & MDIC_READY)
   9627 			break;
   9628 		delay(50);
   9629 	}
   9630 
   9631 	if ((mdic & MDIC_READY) == 0) {
   9632 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9633 		    device_xname(sc->sc_dev), phy, reg);
   9634 		rv = 0;
   9635 	} else if (mdic & MDIC_E) {
   9636 #if 0 /* This is normal if no PHY is present. */
   9637 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9638 		    device_xname(sc->sc_dev), phy, reg);
   9639 #endif
   9640 		rv = 0;
   9641 	} else {
   9642 		rv = MDIC_DATA(mdic);
   9643 		if (rv == 0xffff)
   9644 			rv = 0;
   9645 	}
   9646 
   9647 	return rv;
   9648 }
   9649 
   9650 /*
   9651  * wm_gmii_mdic_writereg:	[mii interface function]
   9652  *
   9653  *	Write a PHY register on the GMII.
   9654  */
   9655 static void
   9656 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9657 {
   9658 	struct wm_softc *sc = device_private(self);
   9659 	uint32_t mdic = 0;
   9660 	int i;
   9661 
   9662 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9663 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9664 
   9665 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9666 		mdic = CSR_READ(sc, WMREG_MDIC);
   9667 		if (mdic & MDIC_READY)
   9668 			break;
   9669 		delay(50);
   9670 	}
   9671 
   9672 	if ((mdic & MDIC_READY) == 0)
   9673 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9674 		    device_xname(sc->sc_dev), phy, reg);
   9675 	else if (mdic & MDIC_E)
   9676 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9677 		    device_xname(sc->sc_dev), phy, reg);
   9678 }
   9679 
   9680 /*
   9681  * wm_gmii_i82544_readreg:	[mii interface function]
   9682  *
   9683  *	Read a PHY register on the GMII.
   9684  */
   9685 static int
   9686 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9687 {
   9688 	struct wm_softc *sc = device_private(self);
   9689 	int rv;
   9690 
   9691 	if (sc->phy.acquire(sc)) {
   9692 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9693 		    __func__);
   9694 		return 0;
   9695 	}
   9696 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9697 	sc->phy.release(sc);
   9698 
   9699 	return rv;
   9700 }
   9701 
   9702 /*
   9703  * wm_gmii_i82544_writereg:	[mii interface function]
   9704  *
   9705  *	Write a PHY register on the GMII.
   9706  */
   9707 static void
   9708 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9709 {
   9710 	struct wm_softc *sc = device_private(self);
   9711 
   9712 	if (sc->phy.acquire(sc)) {
   9713 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9714 		    __func__);
   9715 	}
   9716 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9717 	sc->phy.release(sc);
   9718 }
   9719 
   9720 /*
   9721  * wm_gmii_i80003_readreg:	[mii interface function]
   9722  *
   9723  *	Read a PHY register on the kumeran
   9724  * This could be handled by the PHY layer if we didn't have to lock the
   9725  * ressource ...
   9726  */
   9727 static int
   9728 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9729 {
   9730 	struct wm_softc *sc = device_private(self);
   9731 	int rv;
   9732 
   9733 	if (phy != 1) /* only one PHY on kumeran bus */
   9734 		return 0;
   9735 
   9736 	if (sc->phy.acquire(sc)) {
   9737 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9738 		    __func__);
   9739 		return 0;
   9740 	}
   9741 
   9742 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9743 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9744 		    reg >> GG82563_PAGE_SHIFT);
   9745 	} else {
   9746 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9747 		    reg >> GG82563_PAGE_SHIFT);
   9748 	}
   9749 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9750 	delay(200);
   9751 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9752 	delay(200);
   9753 	sc->phy.release(sc);
   9754 
   9755 	return rv;
   9756 }
   9757 
   9758 /*
   9759  * wm_gmii_i80003_writereg:	[mii interface function]
   9760  *
   9761  *	Write a PHY register on the kumeran.
   9762  * This could be handled by the PHY layer if we didn't have to lock the
   9763  * ressource ...
   9764  */
   9765 static void
   9766 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9767 {
   9768 	struct wm_softc *sc = device_private(self);
   9769 
   9770 	if (phy != 1) /* only one PHY on kumeran bus */
   9771 		return;
   9772 
   9773 	if (sc->phy.acquire(sc)) {
   9774 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9775 		    __func__);
   9776 		return;
   9777 	}
   9778 
   9779 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9780 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9781 		    reg >> GG82563_PAGE_SHIFT);
   9782 	} else {
   9783 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9784 		    reg >> GG82563_PAGE_SHIFT);
   9785 	}
   9786 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9787 	delay(200);
   9788 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9789 	delay(200);
   9790 
   9791 	sc->phy.release(sc);
   9792 }
   9793 
   9794 /*
   9795  * wm_gmii_bm_readreg:	[mii interface function]
   9796  *
   9797  *	Read a PHY register on the kumeran
   9798  * This could be handled by the PHY layer if we didn't have to lock the
   9799  * ressource ...
   9800  */
   9801 static int
   9802 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9803 {
   9804 	struct wm_softc *sc = device_private(self);
   9805 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9806 	uint16_t val;
   9807 	int rv;
   9808 
   9809 	if (sc->phy.acquire(sc)) {
   9810 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9811 		    __func__);
   9812 		return 0;
   9813 	}
   9814 
   9815 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9816 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9817 		    || (reg == 31)) ? 1 : phy;
   9818 	/* Page 800 works differently than the rest so it has its own func */
   9819 	if (page == BM_WUC_PAGE) {
   9820 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9821 		rv = val;
   9822 		goto release;
   9823 	}
   9824 
   9825 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9826 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9827 		    && (sc->sc_type != WM_T_82583))
   9828 			wm_gmii_mdic_writereg(self, phy,
   9829 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9830 		else
   9831 			wm_gmii_mdic_writereg(self, phy,
   9832 			    BME1000_PHY_PAGE_SELECT, page);
   9833 	}
   9834 
   9835 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9836 
   9837 release:
   9838 	sc->phy.release(sc);
   9839 	return rv;
   9840 }
   9841 
   9842 /*
   9843  * wm_gmii_bm_writereg:	[mii interface function]
   9844  *
   9845  *	Write a PHY register on the kumeran.
   9846  * This could be handled by the PHY layer if we didn't have to lock the
   9847  * ressource ...
   9848  */
   9849 static void
   9850 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9851 {
   9852 	struct wm_softc *sc = device_private(self);
   9853 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9854 
   9855 	if (sc->phy.acquire(sc)) {
   9856 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9857 		    __func__);
   9858 		return;
   9859 	}
   9860 
   9861 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9862 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9863 		    || (reg == 31)) ? 1 : phy;
   9864 	/* Page 800 works differently than the rest so it has its own func */
   9865 	if (page == BM_WUC_PAGE) {
   9866 		uint16_t tmp;
   9867 
   9868 		tmp = val;
   9869 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9870 		goto release;
   9871 	}
   9872 
   9873 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9874 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9875 		    && (sc->sc_type != WM_T_82583))
   9876 			wm_gmii_mdic_writereg(self, phy,
   9877 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9878 		else
   9879 			wm_gmii_mdic_writereg(self, phy,
   9880 			    BME1000_PHY_PAGE_SELECT, page);
   9881 	}
   9882 
   9883 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9884 
   9885 release:
   9886 	sc->phy.release(sc);
   9887 }
   9888 
   9889 static void
   9890 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9891 {
   9892 	struct wm_softc *sc = device_private(self);
   9893 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9894 	uint16_t wuce, reg;
   9895 
   9896 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9897 		device_xname(sc->sc_dev), __func__));
   9898 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9899 	if (sc->sc_type == WM_T_PCH) {
   9900 		/* XXX e1000 driver do nothing... why? */
   9901 	}
   9902 
   9903 	/*
   9904 	 * 1) Enable PHY wakeup register first.
   9905 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9906 	 */
   9907 
   9908 	/* Set page 769 */
   9909 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9910 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9911 
   9912 	/* Read WUCE and save it */
   9913 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9914 
   9915 	reg = wuce | BM_WUC_ENABLE_BIT;
   9916 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9917 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9918 
   9919 	/* Select page 800 */
   9920 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9921 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9922 
   9923 	/*
   9924 	 * 2) Access PHY wakeup register.
   9925 	 * See e1000_access_phy_wakeup_reg_bm.
   9926 	 */
   9927 
   9928 	/* Write page 800 */
   9929 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9930 
   9931 	if (rd)
   9932 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9933 	else
   9934 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9935 
   9936 	/*
   9937 	 * 3) Disable PHY wakeup register.
   9938 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9939 	 */
   9940 	/* Set page 769 */
   9941 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9942 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9943 
   9944 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9945 }
   9946 
   9947 /*
   9948  * wm_gmii_hv_readreg:	[mii interface function]
   9949  *
   9950  *	Read a PHY register on the kumeran
   9951  * This could be handled by the PHY layer if we didn't have to lock the
   9952  * ressource ...
   9953  */
   9954 static int
   9955 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9956 {
   9957 	struct wm_softc *sc = device_private(self);
   9958 	int rv;
   9959 
   9960 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9961 		device_xname(sc->sc_dev), __func__));
   9962 	if (sc->phy.acquire(sc)) {
   9963 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9964 		    __func__);
   9965 		return 0;
   9966 	}
   9967 
   9968 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9969 	sc->phy.release(sc);
   9970 	return rv;
   9971 }
   9972 
   9973 static int
   9974 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9975 {
   9976 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9977 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9978 	uint16_t val;
   9979 	int rv;
   9980 
   9981 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9982 
   9983 	/* Page 800 works differently than the rest so it has its own func */
   9984 	if (page == BM_WUC_PAGE) {
   9985 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9986 		return val;
   9987 	}
   9988 
   9989 	/*
   9990 	 * Lower than page 768 works differently than the rest so it has its
   9991 	 * own func
   9992 	 */
   9993 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9994 		printf("gmii_hv_readreg!!!\n");
   9995 		return 0;
   9996 	}
   9997 
   9998 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9999 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10000 		    page << BME1000_PAGE_SHIFT);
   10001 	}
   10002 
   10003 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   10004 	return rv;
   10005 }
   10006 
   10007 /*
   10008  * wm_gmii_hv_writereg:	[mii interface function]
   10009  *
   10010  *	Write a PHY register on the kumeran.
   10011  * This could be handled by the PHY layer if we didn't have to lock the
   10012  * ressource ...
   10013  */
   10014 static void
   10015 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   10016 {
   10017 	struct wm_softc *sc = device_private(self);
   10018 
   10019 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10020 		device_xname(sc->sc_dev), __func__));
   10021 
   10022 	if (sc->phy.acquire(sc)) {
   10023 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10024 		    __func__);
   10025 		return;
   10026 	}
   10027 
   10028 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   10029 	sc->phy.release(sc);
   10030 }
   10031 
   10032 static void
   10033 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   10034 {
   10035 	struct wm_softc *sc = device_private(self);
   10036 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10037 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10038 
   10039 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10040 
   10041 	/* Page 800 works differently than the rest so it has its own func */
   10042 	if (page == BM_WUC_PAGE) {
   10043 		uint16_t tmp;
   10044 
   10045 		tmp = val;
   10046 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   10047 		return;
   10048 	}
   10049 
   10050 	/*
   10051 	 * Lower than page 768 works differently than the rest so it has its
   10052 	 * own func
   10053 	 */
   10054 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10055 		printf("gmii_hv_writereg!!!\n");
   10056 		return;
   10057 	}
   10058 
   10059 	{
   10060 		/*
   10061 		 * XXX Workaround MDIO accesses being disabled after entering
   10062 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10063 		 * register is set)
   10064 		 */
   10065 		if (sc->sc_phytype == WMPHY_82578) {
   10066 			struct mii_softc *child;
   10067 
   10068 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10069 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10070 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10071 			    && ((val & (1 << 11)) != 0)) {
   10072 				printf("XXX need workaround\n");
   10073 			}
   10074 		}
   10075 
   10076 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10077 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10078 			    page << BME1000_PAGE_SHIFT);
   10079 		}
   10080 	}
   10081 
   10082 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10083 }
   10084 
   10085 /*
   10086  * wm_gmii_82580_readreg:	[mii interface function]
   10087  *
   10088  *	Read a PHY register on the 82580 and I350.
   10089  * This could be handled by the PHY layer if we didn't have to lock the
   10090  * ressource ...
   10091  */
   10092 static int
   10093 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10094 {
   10095 	struct wm_softc *sc = device_private(self);
   10096 	int rv;
   10097 
   10098 	if (sc->phy.acquire(sc) != 0) {
   10099 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10100 		    __func__);
   10101 		return 0;
   10102 	}
   10103 
   10104 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10105 
   10106 	sc->phy.release(sc);
   10107 	return rv;
   10108 }
   10109 
   10110 /*
   10111  * wm_gmii_82580_writereg:	[mii interface function]
   10112  *
   10113  *	Write a PHY register on the 82580 and I350.
   10114  * This could be handled by the PHY layer if we didn't have to lock the
   10115  * ressource ...
   10116  */
   10117 static void
   10118 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10119 {
   10120 	struct wm_softc *sc = device_private(self);
   10121 
   10122 	if (sc->phy.acquire(sc) != 0) {
   10123 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10124 		    __func__);
   10125 		return;
   10126 	}
   10127 
   10128 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10129 
   10130 	sc->phy.release(sc);
   10131 }
   10132 
   10133 /*
   10134  * wm_gmii_gs40g_readreg:	[mii interface function]
   10135  *
   10136  *	Read a PHY register on the I2100 and I211.
   10137  * This could be handled by the PHY layer if we didn't have to lock the
   10138  * ressource ...
   10139  */
   10140 static int
   10141 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10142 {
   10143 	struct wm_softc *sc = device_private(self);
   10144 	int page, offset;
   10145 	int rv;
   10146 
   10147 	/* Acquire semaphore */
   10148 	if (sc->phy.acquire(sc)) {
   10149 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10150 		    __func__);
   10151 		return 0;
   10152 	}
   10153 
   10154 	/* Page select */
   10155 	page = reg >> GS40G_PAGE_SHIFT;
   10156 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10157 
   10158 	/* Read reg */
   10159 	offset = reg & GS40G_OFFSET_MASK;
   10160 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10161 
   10162 	sc->phy.release(sc);
   10163 	return rv;
   10164 }
   10165 
   10166 /*
   10167  * wm_gmii_gs40g_writereg:	[mii interface function]
   10168  *
   10169  *	Write a PHY register on the I210 and I211.
   10170  * This could be handled by the PHY layer if we didn't have to lock the
   10171  * ressource ...
   10172  */
   10173 static void
   10174 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10175 {
   10176 	struct wm_softc *sc = device_private(self);
   10177 	int page, offset;
   10178 
   10179 	/* Acquire semaphore */
   10180 	if (sc->phy.acquire(sc)) {
   10181 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10182 		    __func__);
   10183 		return;
   10184 	}
   10185 
   10186 	/* Page select */
   10187 	page = reg >> GS40G_PAGE_SHIFT;
   10188 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10189 
   10190 	/* Write reg */
   10191 	offset = reg & GS40G_OFFSET_MASK;
   10192 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10193 
   10194 	/* Release semaphore */
   10195 	sc->phy.release(sc);
   10196 }
   10197 
   10198 /*
   10199  * wm_gmii_statchg:	[mii interface function]
   10200  *
   10201  *	Callback from MII layer when media changes.
   10202  */
   10203 static void
   10204 wm_gmii_statchg(struct ifnet *ifp)
   10205 {
   10206 	struct wm_softc *sc = ifp->if_softc;
   10207 	struct mii_data *mii = &sc->sc_mii;
   10208 
   10209 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10210 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10211 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10212 
   10213 	/*
   10214 	 * Get flow control negotiation result.
   10215 	 */
   10216 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10217 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10218 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10219 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10220 	}
   10221 
   10222 	if (sc->sc_flowflags & IFM_FLOW) {
   10223 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10224 			sc->sc_ctrl |= CTRL_TFCE;
   10225 			sc->sc_fcrtl |= FCRTL_XONE;
   10226 		}
   10227 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10228 			sc->sc_ctrl |= CTRL_RFCE;
   10229 	}
   10230 
   10231 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10232 		DPRINTF(WM_DEBUG_LINK,
   10233 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10234 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10235 	} else {
   10236 		DPRINTF(WM_DEBUG_LINK,
   10237 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10238 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10239 	}
   10240 
   10241 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10242 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10243 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10244 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10245 	if (sc->sc_type == WM_T_80003) {
   10246 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10247 		case IFM_1000_T:
   10248 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10249 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10250 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10251 			break;
   10252 		default:
   10253 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10254 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10255 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10256 			break;
   10257 		}
   10258 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10259 	}
   10260 }
   10261 
   10262 /* kumeran related (80003, ICH* and PCH*) */
   10263 
   10264 /*
   10265  * wm_kmrn_readreg:
   10266  *
   10267  *	Read a kumeran register
   10268  */
   10269 static int
   10270 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10271 {
   10272 	int rv;
   10273 
   10274 	if (sc->sc_type == WM_T_80003)
   10275 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10276 	else
   10277 		rv = sc->phy.acquire(sc);
   10278 	if (rv != 0) {
   10279 		aprint_error_dev(sc->sc_dev,
   10280 		    "%s: failed to get semaphore\n", __func__);
   10281 		return 0;
   10282 	}
   10283 
   10284 	rv = wm_kmrn_readreg_locked(sc, reg);
   10285 
   10286 	if (sc->sc_type == WM_T_80003)
   10287 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10288 	else
   10289 		sc->phy.release(sc);
   10290 
   10291 	return rv;
   10292 }
   10293 
   10294 static int
   10295 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10296 {
   10297 	int rv;
   10298 
   10299 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10300 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10301 	    KUMCTRLSTA_REN);
   10302 	CSR_WRITE_FLUSH(sc);
   10303 	delay(2);
   10304 
   10305 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10306 
   10307 	return rv;
   10308 }
   10309 
   10310 /*
   10311  * wm_kmrn_writereg:
   10312  *
   10313  *	Write a kumeran register
   10314  */
   10315 static void
   10316 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10317 {
   10318 	int rv;
   10319 
   10320 	if (sc->sc_type == WM_T_80003)
   10321 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10322 	else
   10323 		rv = sc->phy.acquire(sc);
   10324 	if (rv != 0) {
   10325 		aprint_error_dev(sc->sc_dev,
   10326 		    "%s: failed to get semaphore\n", __func__);
   10327 		return;
   10328 	}
   10329 
   10330 	wm_kmrn_writereg_locked(sc, reg, val);
   10331 
   10332 	if (sc->sc_type == WM_T_80003)
   10333 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10334 	else
   10335 		sc->phy.release(sc);
   10336 }
   10337 
   10338 static void
   10339 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10340 {
   10341 
   10342 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10343 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10344 	    (val & KUMCTRLSTA_MASK));
   10345 }
   10346 
   10347 /* SGMII related */
   10348 
   10349 /*
   10350  * wm_sgmii_uses_mdio
   10351  *
   10352  * Check whether the transaction is to the internal PHY or the external
   10353  * MDIO interface. Return true if it's MDIO.
   10354  */
   10355 static bool
   10356 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10357 {
   10358 	uint32_t reg;
   10359 	bool ismdio = false;
   10360 
   10361 	switch (sc->sc_type) {
   10362 	case WM_T_82575:
   10363 	case WM_T_82576:
   10364 		reg = CSR_READ(sc, WMREG_MDIC);
   10365 		ismdio = ((reg & MDIC_DEST) != 0);
   10366 		break;
   10367 	case WM_T_82580:
   10368 	case WM_T_I350:
   10369 	case WM_T_I354:
   10370 	case WM_T_I210:
   10371 	case WM_T_I211:
   10372 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10373 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10374 		break;
   10375 	default:
   10376 		break;
   10377 	}
   10378 
   10379 	return ismdio;
   10380 }
   10381 
   10382 /*
   10383  * wm_sgmii_readreg:	[mii interface function]
   10384  *
   10385  *	Read a PHY register on the SGMII
   10386  * This could be handled by the PHY layer if we didn't have to lock the
   10387  * ressource ...
   10388  */
   10389 static int
   10390 wm_sgmii_readreg(device_t self, int phy, int reg)
   10391 {
   10392 	struct wm_softc *sc = device_private(self);
   10393 	uint32_t i2ccmd;
   10394 	int i, rv;
   10395 
   10396 	if (sc->phy.acquire(sc)) {
   10397 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10398 		    __func__);
   10399 		return 0;
   10400 	}
   10401 
   10402 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10403 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10404 	    | I2CCMD_OPCODE_READ;
   10405 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10406 
   10407 	/* Poll the ready bit */
   10408 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10409 		delay(50);
   10410 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10411 		if (i2ccmd & I2CCMD_READY)
   10412 			break;
   10413 	}
   10414 	if ((i2ccmd & I2CCMD_READY) == 0)
   10415 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10416 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10417 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10418 
   10419 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10420 
   10421 	sc->phy.release(sc);
   10422 	return rv;
   10423 }
   10424 
   10425 /*
   10426  * wm_sgmii_writereg:	[mii interface function]
   10427  *
   10428  *	Write a PHY register on the SGMII.
   10429  * This could be handled by the PHY layer if we didn't have to lock the
   10430  * ressource ...
   10431  */
   10432 static void
   10433 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10434 {
   10435 	struct wm_softc *sc = device_private(self);
   10436 	uint32_t i2ccmd;
   10437 	int i;
   10438 	int val_swapped;
   10439 
   10440 	if (sc->phy.acquire(sc) != 0) {
   10441 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10442 		    __func__);
   10443 		return;
   10444 	}
   10445 	/* Swap the data bytes for the I2C interface */
   10446 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10447 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10448 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10449 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10450 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10451 
   10452 	/* Poll the ready bit */
   10453 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10454 		delay(50);
   10455 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10456 		if (i2ccmd & I2CCMD_READY)
   10457 			break;
   10458 	}
   10459 	if ((i2ccmd & I2CCMD_READY) == 0)
   10460 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10461 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10462 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10463 
   10464 	sc->phy.release(sc);
   10465 }
   10466 
   10467 /* TBI related */
   10468 
   10469 /*
   10470  * wm_tbi_mediainit:
   10471  *
   10472  *	Initialize media for use on 1000BASE-X devices.
   10473  */
   10474 static void
   10475 wm_tbi_mediainit(struct wm_softc *sc)
   10476 {
   10477 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10478 	const char *sep = "";
   10479 
   10480 	if (sc->sc_type < WM_T_82543)
   10481 		sc->sc_tipg = TIPG_WM_DFLT;
   10482 	else
   10483 		sc->sc_tipg = TIPG_LG_DFLT;
   10484 
   10485 	sc->sc_tbi_serdes_anegticks = 5;
   10486 
   10487 	/* Initialize our media structures */
   10488 	sc->sc_mii.mii_ifp = ifp;
   10489 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10490 
   10491 	if ((sc->sc_type >= WM_T_82575)
   10492 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10493 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10494 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10495 	else
   10496 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10497 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10498 
   10499 	/*
   10500 	 * SWD Pins:
   10501 	 *
   10502 	 *	0 = Link LED (output)
   10503 	 *	1 = Loss Of Signal (input)
   10504 	 */
   10505 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10506 
   10507 	/* XXX Perhaps this is only for TBI */
   10508 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10509 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10510 
   10511 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10512 		sc->sc_ctrl &= ~CTRL_LRST;
   10513 
   10514 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10515 
   10516 #define	ADD(ss, mm, dd)							\
   10517 do {									\
   10518 	aprint_normal("%s%s", sep, ss);					\
   10519 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10520 	sep = ", ";							\
   10521 } while (/*CONSTCOND*/0)
   10522 
   10523 	aprint_normal_dev(sc->sc_dev, "");
   10524 
   10525 	if (sc->sc_type == WM_T_I354) {
   10526 		uint32_t status;
   10527 
   10528 		status = CSR_READ(sc, WMREG_STATUS);
   10529 		if (((status & STATUS_2P5_SKU) != 0)
   10530 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10531 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10532 		} else
   10533 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10534 	} else if (sc->sc_type == WM_T_82545) {
   10535 		/* Only 82545 is LX (XXX except SFP) */
   10536 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10537 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10538 	} else {
   10539 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10540 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10541 	}
   10542 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10543 	aprint_normal("\n");
   10544 
   10545 #undef ADD
   10546 
   10547 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10548 }
   10549 
   10550 /*
   10551  * wm_tbi_mediachange:	[ifmedia interface function]
   10552  *
   10553  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10554  */
   10555 static int
   10556 wm_tbi_mediachange(struct ifnet *ifp)
   10557 {
   10558 	struct wm_softc *sc = ifp->if_softc;
   10559 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10560 	uint32_t status;
   10561 	int i;
   10562 
   10563 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10564 		/* XXX need some work for >= 82571 and < 82575 */
   10565 		if (sc->sc_type < WM_T_82575)
   10566 			return 0;
   10567 	}
   10568 
   10569 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10570 	    || (sc->sc_type >= WM_T_82575))
   10571 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10572 
   10573 	sc->sc_ctrl &= ~CTRL_LRST;
   10574 	sc->sc_txcw = TXCW_ANE;
   10575 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10576 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10577 	else if (ife->ifm_media & IFM_FDX)
   10578 		sc->sc_txcw |= TXCW_FD;
   10579 	else
   10580 		sc->sc_txcw |= TXCW_HD;
   10581 
   10582 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10583 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10584 
   10585 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10586 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10587 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10588 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10589 	CSR_WRITE_FLUSH(sc);
   10590 	delay(1000);
   10591 
   10592 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10593 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10594 
   10595 	/*
   10596 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10597 	 * optics detect a signal, 0 if they don't.
   10598 	 */
   10599 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10600 		/* Have signal; wait for the link to come up. */
   10601 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10602 			delay(10000);
   10603 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10604 				break;
   10605 		}
   10606 
   10607 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10608 			    device_xname(sc->sc_dev),i));
   10609 
   10610 		status = CSR_READ(sc, WMREG_STATUS);
   10611 		DPRINTF(WM_DEBUG_LINK,
   10612 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10613 			device_xname(sc->sc_dev),status, STATUS_LU));
   10614 		if (status & STATUS_LU) {
   10615 			/* Link is up. */
   10616 			DPRINTF(WM_DEBUG_LINK,
   10617 			    ("%s: LINK: set media -> link up %s\n",
   10618 			    device_xname(sc->sc_dev),
   10619 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10620 
   10621 			/*
   10622 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10623 			 * so we should update sc->sc_ctrl
   10624 			 */
   10625 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10626 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10627 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10628 			if (status & STATUS_FD)
   10629 				sc->sc_tctl |=
   10630 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10631 			else
   10632 				sc->sc_tctl |=
   10633 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10634 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10635 				sc->sc_fcrtl |= FCRTL_XONE;
   10636 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10637 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10638 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10639 				      sc->sc_fcrtl);
   10640 			sc->sc_tbi_linkup = 1;
   10641 		} else {
   10642 			if (i == WM_LINKUP_TIMEOUT)
   10643 				wm_check_for_link(sc);
   10644 			/* Link is down. */
   10645 			DPRINTF(WM_DEBUG_LINK,
   10646 			    ("%s: LINK: set media -> link down\n",
   10647 			    device_xname(sc->sc_dev)));
   10648 			sc->sc_tbi_linkup = 0;
   10649 		}
   10650 	} else {
   10651 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10652 		    device_xname(sc->sc_dev)));
   10653 		sc->sc_tbi_linkup = 0;
   10654 	}
   10655 
   10656 	wm_tbi_serdes_set_linkled(sc);
   10657 
   10658 	return 0;
   10659 }
   10660 
   10661 /*
   10662  * wm_tbi_mediastatus:	[ifmedia interface function]
   10663  *
   10664  *	Get the current interface media status on a 1000BASE-X device.
   10665  */
   10666 static void
   10667 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10668 {
   10669 	struct wm_softc *sc = ifp->if_softc;
   10670 	uint32_t ctrl, status;
   10671 
   10672 	ifmr->ifm_status = IFM_AVALID;
   10673 	ifmr->ifm_active = IFM_ETHER;
   10674 
   10675 	status = CSR_READ(sc, WMREG_STATUS);
   10676 	if ((status & STATUS_LU) == 0) {
   10677 		ifmr->ifm_active |= IFM_NONE;
   10678 		return;
   10679 	}
   10680 
   10681 	ifmr->ifm_status |= IFM_ACTIVE;
   10682 	/* Only 82545 is LX */
   10683 	if (sc->sc_type == WM_T_82545)
   10684 		ifmr->ifm_active |= IFM_1000_LX;
   10685 	else
   10686 		ifmr->ifm_active |= IFM_1000_SX;
   10687 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10688 		ifmr->ifm_active |= IFM_FDX;
   10689 	else
   10690 		ifmr->ifm_active |= IFM_HDX;
   10691 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10692 	if (ctrl & CTRL_RFCE)
   10693 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10694 	if (ctrl & CTRL_TFCE)
   10695 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10696 }
   10697 
   10698 /* XXX TBI only */
   10699 static int
   10700 wm_check_for_link(struct wm_softc *sc)
   10701 {
   10702 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10703 	uint32_t rxcw;
   10704 	uint32_t ctrl;
   10705 	uint32_t status;
   10706 	uint32_t sig;
   10707 
   10708 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10709 		/* XXX need some work for >= 82571 */
   10710 		if (sc->sc_type >= WM_T_82571) {
   10711 			sc->sc_tbi_linkup = 1;
   10712 			return 0;
   10713 		}
   10714 	}
   10715 
   10716 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10717 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10718 	status = CSR_READ(sc, WMREG_STATUS);
   10719 
   10720 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10721 
   10722 	DPRINTF(WM_DEBUG_LINK,
   10723 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10724 		device_xname(sc->sc_dev), __func__,
   10725 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10726 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10727 
   10728 	/*
   10729 	 * SWDPIN   LU RXCW
   10730 	 *      0    0    0
   10731 	 *      0    0    1	(should not happen)
   10732 	 *      0    1    0	(should not happen)
   10733 	 *      0    1    1	(should not happen)
   10734 	 *      1    0    0	Disable autonego and force linkup
   10735 	 *      1    0    1	got /C/ but not linkup yet
   10736 	 *      1    1    0	(linkup)
   10737 	 *      1    1    1	If IFM_AUTO, back to autonego
   10738 	 *
   10739 	 */
   10740 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10741 	    && ((status & STATUS_LU) == 0)
   10742 	    && ((rxcw & RXCW_C) == 0)) {
   10743 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10744 			__func__));
   10745 		sc->sc_tbi_linkup = 0;
   10746 		/* Disable auto-negotiation in the TXCW register */
   10747 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10748 
   10749 		/*
   10750 		 * Force link-up and also force full-duplex.
   10751 		 *
   10752 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10753 		 * so we should update sc->sc_ctrl
   10754 		 */
   10755 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10756 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10757 	} else if (((status & STATUS_LU) != 0)
   10758 	    && ((rxcw & RXCW_C) != 0)
   10759 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10760 		sc->sc_tbi_linkup = 1;
   10761 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10762 			__func__));
   10763 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10764 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10765 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10766 	    && ((rxcw & RXCW_C) != 0)) {
   10767 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10768 	} else {
   10769 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10770 			status));
   10771 	}
   10772 
   10773 	return 0;
   10774 }
   10775 
   10776 /*
   10777  * wm_tbi_tick:
   10778  *
   10779  *	Check the link on TBI devices.
   10780  *	This function acts as mii_tick().
   10781  */
   10782 static void
   10783 wm_tbi_tick(struct wm_softc *sc)
   10784 {
   10785 	struct mii_data *mii = &sc->sc_mii;
   10786 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10787 	uint32_t status;
   10788 
   10789 	KASSERT(WM_CORE_LOCKED(sc));
   10790 
   10791 	status = CSR_READ(sc, WMREG_STATUS);
   10792 
   10793 	/* XXX is this needed? */
   10794 	(void)CSR_READ(sc, WMREG_RXCW);
   10795 	(void)CSR_READ(sc, WMREG_CTRL);
   10796 
   10797 	/* set link status */
   10798 	if ((status & STATUS_LU) == 0) {
   10799 		DPRINTF(WM_DEBUG_LINK,
   10800 		    ("%s: LINK: checklink -> down\n",
   10801 			device_xname(sc->sc_dev)));
   10802 		sc->sc_tbi_linkup = 0;
   10803 	} else if (sc->sc_tbi_linkup == 0) {
   10804 		DPRINTF(WM_DEBUG_LINK,
   10805 		    ("%s: LINK: checklink -> up %s\n",
   10806 			device_xname(sc->sc_dev),
   10807 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10808 		sc->sc_tbi_linkup = 1;
   10809 		sc->sc_tbi_serdes_ticks = 0;
   10810 	}
   10811 
   10812 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10813 		goto setled;
   10814 
   10815 	if ((status & STATUS_LU) == 0) {
   10816 		sc->sc_tbi_linkup = 0;
   10817 		/* If the timer expired, retry autonegotiation */
   10818 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10819 		    && (++sc->sc_tbi_serdes_ticks
   10820 			>= sc->sc_tbi_serdes_anegticks)) {
   10821 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10822 			sc->sc_tbi_serdes_ticks = 0;
   10823 			/*
   10824 			 * Reset the link, and let autonegotiation do
   10825 			 * its thing
   10826 			 */
   10827 			sc->sc_ctrl |= CTRL_LRST;
   10828 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10829 			CSR_WRITE_FLUSH(sc);
   10830 			delay(1000);
   10831 			sc->sc_ctrl &= ~CTRL_LRST;
   10832 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10833 			CSR_WRITE_FLUSH(sc);
   10834 			delay(1000);
   10835 			CSR_WRITE(sc, WMREG_TXCW,
   10836 			    sc->sc_txcw & ~TXCW_ANE);
   10837 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10838 		}
   10839 	}
   10840 
   10841 setled:
   10842 	wm_tbi_serdes_set_linkled(sc);
   10843 }
   10844 
   10845 /* SERDES related */
   10846 static void
   10847 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10848 {
   10849 	uint32_t reg;
   10850 
   10851 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10852 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10853 		return;
   10854 
   10855 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10856 	reg |= PCS_CFG_PCS_EN;
   10857 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10858 
   10859 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10860 	reg &= ~CTRL_EXT_SWDPIN(3);
   10861 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10862 	CSR_WRITE_FLUSH(sc);
   10863 }
   10864 
   10865 static int
   10866 wm_serdes_mediachange(struct ifnet *ifp)
   10867 {
   10868 	struct wm_softc *sc = ifp->if_softc;
   10869 	bool pcs_autoneg = true; /* XXX */
   10870 	uint32_t ctrl_ext, pcs_lctl, reg;
   10871 
   10872 	/* XXX Currently, this function is not called on 8257[12] */
   10873 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10874 	    || (sc->sc_type >= WM_T_82575))
   10875 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10876 
   10877 	wm_serdes_power_up_link_82575(sc);
   10878 
   10879 	sc->sc_ctrl |= CTRL_SLU;
   10880 
   10881 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10882 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10883 
   10884 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10885 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10886 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10887 	case CTRL_EXT_LINK_MODE_SGMII:
   10888 		pcs_autoneg = true;
   10889 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10890 		break;
   10891 	case CTRL_EXT_LINK_MODE_1000KX:
   10892 		pcs_autoneg = false;
   10893 		/* FALLTHROUGH */
   10894 	default:
   10895 		if ((sc->sc_type == WM_T_82575)
   10896 		    || (sc->sc_type == WM_T_82576)) {
   10897 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10898 				pcs_autoneg = false;
   10899 		}
   10900 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10901 		    | CTRL_FRCFDX;
   10902 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10903 	}
   10904 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10905 
   10906 	if (pcs_autoneg) {
   10907 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10908 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10909 
   10910 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10911 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10912 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10913 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10914 	} else
   10915 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10916 
   10917 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10918 
   10919 
   10920 	return 0;
   10921 }
   10922 
   10923 static void
   10924 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10925 {
   10926 	struct wm_softc *sc = ifp->if_softc;
   10927 	struct mii_data *mii = &sc->sc_mii;
   10928 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10929 	uint32_t pcs_adv, pcs_lpab, reg;
   10930 
   10931 	ifmr->ifm_status = IFM_AVALID;
   10932 	ifmr->ifm_active = IFM_ETHER;
   10933 
   10934 	/* Check PCS */
   10935 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10936 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10937 		ifmr->ifm_active |= IFM_NONE;
   10938 		sc->sc_tbi_linkup = 0;
   10939 		goto setled;
   10940 	}
   10941 
   10942 	sc->sc_tbi_linkup = 1;
   10943 	ifmr->ifm_status |= IFM_ACTIVE;
   10944 	if (sc->sc_type == WM_T_I354) {
   10945 		uint32_t status;
   10946 
   10947 		status = CSR_READ(sc, WMREG_STATUS);
   10948 		if (((status & STATUS_2P5_SKU) != 0)
   10949 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10950 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10951 		} else
   10952 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10953 	} else {
   10954 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10955 		case PCS_LSTS_SPEED_10:
   10956 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10957 			break;
   10958 		case PCS_LSTS_SPEED_100:
   10959 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10960 			break;
   10961 		case PCS_LSTS_SPEED_1000:
   10962 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10963 			break;
   10964 		default:
   10965 			device_printf(sc->sc_dev, "Unknown speed\n");
   10966 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10967 			break;
   10968 		}
   10969 	}
   10970 	if ((reg & PCS_LSTS_FDX) != 0)
   10971 		ifmr->ifm_active |= IFM_FDX;
   10972 	else
   10973 		ifmr->ifm_active |= IFM_HDX;
   10974 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10975 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10976 		/* Check flow */
   10977 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10978 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10979 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10980 			goto setled;
   10981 		}
   10982 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10983 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10984 		DPRINTF(WM_DEBUG_LINK,
   10985 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10986 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10987 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10988 			mii->mii_media_active |= IFM_FLOW
   10989 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10990 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10991 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10992 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10993 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10994 			mii->mii_media_active |= IFM_FLOW
   10995 			    | IFM_ETH_TXPAUSE;
   10996 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10997 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10998 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10999 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11000 			mii->mii_media_active |= IFM_FLOW
   11001 			    | IFM_ETH_RXPAUSE;
   11002 		}
   11003 	}
   11004 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11005 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11006 setled:
   11007 	wm_tbi_serdes_set_linkled(sc);
   11008 }
   11009 
   11010 /*
   11011  * wm_serdes_tick:
   11012  *
   11013  *	Check the link on serdes devices.
   11014  */
   11015 static void
   11016 wm_serdes_tick(struct wm_softc *sc)
   11017 {
   11018 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11019 	struct mii_data *mii = &sc->sc_mii;
   11020 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11021 	uint32_t reg;
   11022 
   11023 	KASSERT(WM_CORE_LOCKED(sc));
   11024 
   11025 	mii->mii_media_status = IFM_AVALID;
   11026 	mii->mii_media_active = IFM_ETHER;
   11027 
   11028 	/* Check PCS */
   11029 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11030 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11031 		mii->mii_media_status |= IFM_ACTIVE;
   11032 		sc->sc_tbi_linkup = 1;
   11033 		sc->sc_tbi_serdes_ticks = 0;
   11034 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11035 		if ((reg & PCS_LSTS_FDX) != 0)
   11036 			mii->mii_media_active |= IFM_FDX;
   11037 		else
   11038 			mii->mii_media_active |= IFM_HDX;
   11039 	} else {
   11040 		mii->mii_media_status |= IFM_NONE;
   11041 		sc->sc_tbi_linkup = 0;
   11042 		/* If the timer expired, retry autonegotiation */
   11043 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11044 		    && (++sc->sc_tbi_serdes_ticks
   11045 			>= sc->sc_tbi_serdes_anegticks)) {
   11046 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11047 			sc->sc_tbi_serdes_ticks = 0;
   11048 			/* XXX */
   11049 			wm_serdes_mediachange(ifp);
   11050 		}
   11051 	}
   11052 
   11053 	wm_tbi_serdes_set_linkled(sc);
   11054 }
   11055 
   11056 /* SFP related */
   11057 
   11058 static int
   11059 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11060 {
   11061 	uint32_t i2ccmd;
   11062 	int i;
   11063 
   11064 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11065 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11066 
   11067 	/* Poll the ready bit */
   11068 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11069 		delay(50);
   11070 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11071 		if (i2ccmd & I2CCMD_READY)
   11072 			break;
   11073 	}
   11074 	if ((i2ccmd & I2CCMD_READY) == 0)
   11075 		return -1;
   11076 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11077 		return -1;
   11078 
   11079 	*data = i2ccmd & 0x00ff;
   11080 
   11081 	return 0;
   11082 }
   11083 
   11084 static uint32_t
   11085 wm_sfp_get_media_type(struct wm_softc *sc)
   11086 {
   11087 	uint32_t ctrl_ext;
   11088 	uint8_t val = 0;
   11089 	int timeout = 3;
   11090 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11091 	int rv = -1;
   11092 
   11093 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11094 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11095 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11096 	CSR_WRITE_FLUSH(sc);
   11097 
   11098 	/* Read SFP module data */
   11099 	while (timeout) {
   11100 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11101 		if (rv == 0)
   11102 			break;
   11103 		delay(100*1000); /* XXX too big */
   11104 		timeout--;
   11105 	}
   11106 	if (rv != 0)
   11107 		goto out;
   11108 	switch (val) {
   11109 	case SFF_SFP_ID_SFF:
   11110 		aprint_normal_dev(sc->sc_dev,
   11111 		    "Module/Connector soldered to board\n");
   11112 		break;
   11113 	case SFF_SFP_ID_SFP:
   11114 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11115 		break;
   11116 	case SFF_SFP_ID_UNKNOWN:
   11117 		goto out;
   11118 	default:
   11119 		break;
   11120 	}
   11121 
   11122 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11123 	if (rv != 0) {
   11124 		goto out;
   11125 	}
   11126 
   11127 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11128 		mediatype = WM_MEDIATYPE_SERDES;
   11129 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11130 		sc->sc_flags |= WM_F_SGMII;
   11131 		mediatype = WM_MEDIATYPE_COPPER;
   11132 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11133 		sc->sc_flags |= WM_F_SGMII;
   11134 		mediatype = WM_MEDIATYPE_SERDES;
   11135 	}
   11136 
   11137 out:
   11138 	/* Restore I2C interface setting */
   11139 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11140 
   11141 	return mediatype;
   11142 }
   11143 
   11144 /*
   11145  * NVM related.
   11146  * Microwire, SPI (w/wo EERD) and Flash.
   11147  */
   11148 
   11149 /* Both spi and uwire */
   11150 
   11151 /*
   11152  * wm_eeprom_sendbits:
   11153  *
   11154  *	Send a series of bits to the EEPROM.
   11155  */
   11156 static void
   11157 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11158 {
   11159 	uint32_t reg;
   11160 	int x;
   11161 
   11162 	reg = CSR_READ(sc, WMREG_EECD);
   11163 
   11164 	for (x = nbits; x > 0; x--) {
   11165 		if (bits & (1U << (x - 1)))
   11166 			reg |= EECD_DI;
   11167 		else
   11168 			reg &= ~EECD_DI;
   11169 		CSR_WRITE(sc, WMREG_EECD, reg);
   11170 		CSR_WRITE_FLUSH(sc);
   11171 		delay(2);
   11172 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11173 		CSR_WRITE_FLUSH(sc);
   11174 		delay(2);
   11175 		CSR_WRITE(sc, WMREG_EECD, reg);
   11176 		CSR_WRITE_FLUSH(sc);
   11177 		delay(2);
   11178 	}
   11179 }
   11180 
   11181 /*
   11182  * wm_eeprom_recvbits:
   11183  *
   11184  *	Receive a series of bits from the EEPROM.
   11185  */
   11186 static void
   11187 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11188 {
   11189 	uint32_t reg, val;
   11190 	int x;
   11191 
   11192 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11193 
   11194 	val = 0;
   11195 	for (x = nbits; x > 0; x--) {
   11196 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11197 		CSR_WRITE_FLUSH(sc);
   11198 		delay(2);
   11199 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11200 			val |= (1U << (x - 1));
   11201 		CSR_WRITE(sc, WMREG_EECD, reg);
   11202 		CSR_WRITE_FLUSH(sc);
   11203 		delay(2);
   11204 	}
   11205 	*valp = val;
   11206 }
   11207 
   11208 /* Microwire */
   11209 
   11210 /*
   11211  * wm_nvm_read_uwire:
   11212  *
   11213  *	Read a word from the EEPROM using the MicroWire protocol.
   11214  */
   11215 static int
   11216 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11217 {
   11218 	uint32_t reg, val;
   11219 	int i;
   11220 
   11221 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11222 		device_xname(sc->sc_dev), __func__));
   11223 
   11224 	for (i = 0; i < wordcnt; i++) {
   11225 		/* Clear SK and DI. */
   11226 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11227 		CSR_WRITE(sc, WMREG_EECD, reg);
   11228 
   11229 		/*
   11230 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11231 		 * and Xen.
   11232 		 *
   11233 		 * We use this workaround only for 82540 because qemu's
   11234 		 * e1000 act as 82540.
   11235 		 */
   11236 		if (sc->sc_type == WM_T_82540) {
   11237 			reg |= EECD_SK;
   11238 			CSR_WRITE(sc, WMREG_EECD, reg);
   11239 			reg &= ~EECD_SK;
   11240 			CSR_WRITE(sc, WMREG_EECD, reg);
   11241 			CSR_WRITE_FLUSH(sc);
   11242 			delay(2);
   11243 		}
   11244 		/* XXX: end of workaround */
   11245 
   11246 		/* Set CHIP SELECT. */
   11247 		reg |= EECD_CS;
   11248 		CSR_WRITE(sc, WMREG_EECD, reg);
   11249 		CSR_WRITE_FLUSH(sc);
   11250 		delay(2);
   11251 
   11252 		/* Shift in the READ command. */
   11253 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11254 
   11255 		/* Shift in address. */
   11256 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11257 
   11258 		/* Shift out the data. */
   11259 		wm_eeprom_recvbits(sc, &val, 16);
   11260 		data[i] = val & 0xffff;
   11261 
   11262 		/* Clear CHIP SELECT. */
   11263 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11264 		CSR_WRITE(sc, WMREG_EECD, reg);
   11265 		CSR_WRITE_FLUSH(sc);
   11266 		delay(2);
   11267 	}
   11268 
   11269 	return 0;
   11270 }
   11271 
   11272 /* SPI */
   11273 
   11274 /*
   11275  * Set SPI and FLASH related information from the EECD register.
   11276  * For 82541 and 82547, the word size is taken from EEPROM.
   11277  */
   11278 static int
   11279 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11280 {
   11281 	int size;
   11282 	uint32_t reg;
   11283 	uint16_t data;
   11284 
   11285 	reg = CSR_READ(sc, WMREG_EECD);
   11286 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11287 
   11288 	/* Read the size of NVM from EECD by default */
   11289 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11290 	switch (sc->sc_type) {
   11291 	case WM_T_82541:
   11292 	case WM_T_82541_2:
   11293 	case WM_T_82547:
   11294 	case WM_T_82547_2:
   11295 		/* Set dummy value to access EEPROM */
   11296 		sc->sc_nvm_wordsize = 64;
   11297 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11298 		reg = data;
   11299 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11300 		if (size == 0)
   11301 			size = 6; /* 64 word size */
   11302 		else
   11303 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11304 		break;
   11305 	case WM_T_80003:
   11306 	case WM_T_82571:
   11307 	case WM_T_82572:
   11308 	case WM_T_82573: /* SPI case */
   11309 	case WM_T_82574: /* SPI case */
   11310 	case WM_T_82583: /* SPI case */
   11311 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11312 		if (size > 14)
   11313 			size = 14;
   11314 		break;
   11315 	case WM_T_82575:
   11316 	case WM_T_82576:
   11317 	case WM_T_82580:
   11318 	case WM_T_I350:
   11319 	case WM_T_I354:
   11320 	case WM_T_I210:
   11321 	case WM_T_I211:
   11322 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11323 		if (size > 15)
   11324 			size = 15;
   11325 		break;
   11326 	default:
   11327 		aprint_error_dev(sc->sc_dev,
   11328 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11329 		return -1;
   11330 		break;
   11331 	}
   11332 
   11333 	sc->sc_nvm_wordsize = 1 << size;
   11334 
   11335 	return 0;
   11336 }
   11337 
   11338 /*
   11339  * wm_nvm_ready_spi:
   11340  *
   11341  *	Wait for a SPI EEPROM to be ready for commands.
   11342  */
   11343 static int
   11344 wm_nvm_ready_spi(struct wm_softc *sc)
   11345 {
   11346 	uint32_t val;
   11347 	int usec;
   11348 
   11349 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11350 		device_xname(sc->sc_dev), __func__));
   11351 
   11352 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11353 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11354 		wm_eeprom_recvbits(sc, &val, 8);
   11355 		if ((val & SPI_SR_RDY) == 0)
   11356 			break;
   11357 	}
   11358 	if (usec >= SPI_MAX_RETRIES) {
   11359 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11360 		return 1;
   11361 	}
   11362 	return 0;
   11363 }
   11364 
   11365 /*
   11366  * wm_nvm_read_spi:
   11367  *
   11368  *	Read a work from the EEPROM using the SPI protocol.
   11369  */
   11370 static int
   11371 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11372 {
   11373 	uint32_t reg, val;
   11374 	int i;
   11375 	uint8_t opc;
   11376 
   11377 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11378 		device_xname(sc->sc_dev), __func__));
   11379 
   11380 	/* Clear SK and CS. */
   11381 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11382 	CSR_WRITE(sc, WMREG_EECD, reg);
   11383 	CSR_WRITE_FLUSH(sc);
   11384 	delay(2);
   11385 
   11386 	if (wm_nvm_ready_spi(sc))
   11387 		return 1;
   11388 
   11389 	/* Toggle CS to flush commands. */
   11390 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11391 	CSR_WRITE_FLUSH(sc);
   11392 	delay(2);
   11393 	CSR_WRITE(sc, WMREG_EECD, reg);
   11394 	CSR_WRITE_FLUSH(sc);
   11395 	delay(2);
   11396 
   11397 	opc = SPI_OPC_READ;
   11398 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11399 		opc |= SPI_OPC_A8;
   11400 
   11401 	wm_eeprom_sendbits(sc, opc, 8);
   11402 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11403 
   11404 	for (i = 0; i < wordcnt; i++) {
   11405 		wm_eeprom_recvbits(sc, &val, 16);
   11406 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11407 	}
   11408 
   11409 	/* Raise CS and clear SK. */
   11410 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11411 	CSR_WRITE(sc, WMREG_EECD, reg);
   11412 	CSR_WRITE_FLUSH(sc);
   11413 	delay(2);
   11414 
   11415 	return 0;
   11416 }
   11417 
   11418 /* Using with EERD */
   11419 
   11420 static int
   11421 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11422 {
   11423 	uint32_t attempts = 100000;
   11424 	uint32_t i, reg = 0;
   11425 	int32_t done = -1;
   11426 
   11427 	for (i = 0; i < attempts; i++) {
   11428 		reg = CSR_READ(sc, rw);
   11429 
   11430 		if (reg & EERD_DONE) {
   11431 			done = 0;
   11432 			break;
   11433 		}
   11434 		delay(5);
   11435 	}
   11436 
   11437 	return done;
   11438 }
   11439 
   11440 static int
   11441 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11442     uint16_t *data)
   11443 {
   11444 	int i, eerd = 0;
   11445 	int error = 0;
   11446 
   11447 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11448 		device_xname(sc->sc_dev), __func__));
   11449 
   11450 	for (i = 0; i < wordcnt; i++) {
   11451 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11452 
   11453 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11454 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11455 		if (error != 0)
   11456 			break;
   11457 
   11458 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11459 	}
   11460 
   11461 	return error;
   11462 }
   11463 
   11464 /* Flash */
   11465 
   11466 static int
   11467 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11468 {
   11469 	uint32_t eecd;
   11470 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11471 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11472 	uint8_t sig_byte = 0;
   11473 
   11474 	switch (sc->sc_type) {
   11475 	case WM_T_PCH_SPT:
   11476 		/*
   11477 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11478 		 * sector valid bits from the NVM.
   11479 		 */
   11480 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11481 		if ((*bank == 0) || (*bank == 1)) {
   11482 			aprint_error_dev(sc->sc_dev,
   11483 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11484 				*bank);
   11485 			return -1;
   11486 		} else {
   11487 			*bank = *bank - 2;
   11488 			return 0;
   11489 		}
   11490 	case WM_T_ICH8:
   11491 	case WM_T_ICH9:
   11492 		eecd = CSR_READ(sc, WMREG_EECD);
   11493 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11494 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11495 			return 0;
   11496 		}
   11497 		/* FALLTHROUGH */
   11498 	default:
   11499 		/* Default to 0 */
   11500 		*bank = 0;
   11501 
   11502 		/* Check bank 0 */
   11503 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11504 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11505 			*bank = 0;
   11506 			return 0;
   11507 		}
   11508 
   11509 		/* Check bank 1 */
   11510 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11511 		    &sig_byte);
   11512 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11513 			*bank = 1;
   11514 			return 0;
   11515 		}
   11516 	}
   11517 
   11518 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11519 		device_xname(sc->sc_dev)));
   11520 	return -1;
   11521 }
   11522 
   11523 /******************************************************************************
   11524  * This function does initial flash setup so that a new read/write/erase cycle
   11525  * can be started.
   11526  *
   11527  * sc - The pointer to the hw structure
   11528  ****************************************************************************/
   11529 static int32_t
   11530 wm_ich8_cycle_init(struct wm_softc *sc)
   11531 {
   11532 	uint16_t hsfsts;
   11533 	int32_t error = 1;
   11534 	int32_t i     = 0;
   11535 
   11536 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11537 
   11538 	/* May be check the Flash Des Valid bit in Hw status */
   11539 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11540 		return error;
   11541 	}
   11542 
   11543 	/* Clear FCERR in Hw status by writing 1 */
   11544 	/* Clear DAEL in Hw status by writing a 1 */
   11545 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11546 
   11547 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11548 
   11549 	/*
   11550 	 * Either we should have a hardware SPI cycle in progress bit to check
   11551 	 * against, in order to start a new cycle or FDONE bit should be
   11552 	 * changed in the hardware so that it is 1 after harware reset, which
   11553 	 * can then be used as an indication whether a cycle is in progress or
   11554 	 * has been completed .. we should also have some software semaphore
   11555 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11556 	 * threads access to those bits can be sequentiallized or a way so that
   11557 	 * 2 threads dont start the cycle at the same time
   11558 	 */
   11559 
   11560 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11561 		/*
   11562 		 * There is no cycle running at present, so we can start a
   11563 		 * cycle
   11564 		 */
   11565 
   11566 		/* Begin by setting Flash Cycle Done. */
   11567 		hsfsts |= HSFSTS_DONE;
   11568 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11569 		error = 0;
   11570 	} else {
   11571 		/*
   11572 		 * otherwise poll for sometime so the current cycle has a
   11573 		 * chance to end before giving up.
   11574 		 */
   11575 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11576 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11577 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11578 				error = 0;
   11579 				break;
   11580 			}
   11581 			delay(1);
   11582 		}
   11583 		if (error == 0) {
   11584 			/*
   11585 			 * Successful in waiting for previous cycle to timeout,
   11586 			 * now set the Flash Cycle Done.
   11587 			 */
   11588 			hsfsts |= HSFSTS_DONE;
   11589 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11590 		}
   11591 	}
   11592 	return error;
   11593 }
   11594 
   11595 /******************************************************************************
   11596  * This function starts a flash cycle and waits for its completion
   11597  *
   11598  * sc - The pointer to the hw structure
   11599  ****************************************************************************/
   11600 static int32_t
   11601 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11602 {
   11603 	uint16_t hsflctl;
   11604 	uint16_t hsfsts;
   11605 	int32_t error = 1;
   11606 	uint32_t i = 0;
   11607 
   11608 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11609 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11610 	hsflctl |= HSFCTL_GO;
   11611 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11612 
   11613 	/* Wait till FDONE bit is set to 1 */
   11614 	do {
   11615 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11616 		if (hsfsts & HSFSTS_DONE)
   11617 			break;
   11618 		delay(1);
   11619 		i++;
   11620 	} while (i < timeout);
   11621 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11622 		error = 0;
   11623 
   11624 	return error;
   11625 }
   11626 
   11627 /******************************************************************************
   11628  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11629  *
   11630  * sc - The pointer to the hw structure
   11631  * index - The index of the byte or word to read.
   11632  * size - Size of data to read, 1=byte 2=word, 4=dword
   11633  * data - Pointer to the word to store the value read.
   11634  *****************************************************************************/
   11635 static int32_t
   11636 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11637     uint32_t size, uint32_t *data)
   11638 {
   11639 	uint16_t hsfsts;
   11640 	uint16_t hsflctl;
   11641 	uint32_t flash_linear_address;
   11642 	uint32_t flash_data = 0;
   11643 	int32_t error = 1;
   11644 	int32_t count = 0;
   11645 
   11646 	if (size < 1  || size > 4 || data == 0x0 ||
   11647 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11648 		return error;
   11649 
   11650 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11651 	    sc->sc_ich8_flash_base;
   11652 
   11653 	do {
   11654 		delay(1);
   11655 		/* Steps */
   11656 		error = wm_ich8_cycle_init(sc);
   11657 		if (error)
   11658 			break;
   11659 
   11660 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11661 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11662 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11663 		    & HSFCTL_BCOUNT_MASK;
   11664 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11665 		if (sc->sc_type == WM_T_PCH_SPT) {
   11666 			/*
   11667 			 * In SPT, This register is in Lan memory space, not
   11668 			 * flash. Therefore, only 32 bit access is supported.
   11669 			 */
   11670 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11671 			    (uint32_t)hsflctl);
   11672 		} else
   11673 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11674 
   11675 		/*
   11676 		 * Write the last 24 bits of index into Flash Linear address
   11677 		 * field in Flash Address
   11678 		 */
   11679 		/* TODO: TBD maybe check the index against the size of flash */
   11680 
   11681 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11682 
   11683 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11684 
   11685 		/*
   11686 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11687 		 * the whole sequence a few more times, else read in (shift in)
   11688 		 * the Flash Data0, the order is least significant byte first
   11689 		 * msb to lsb
   11690 		 */
   11691 		if (error == 0) {
   11692 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11693 			if (size == 1)
   11694 				*data = (uint8_t)(flash_data & 0x000000FF);
   11695 			else if (size == 2)
   11696 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11697 			else if (size == 4)
   11698 				*data = (uint32_t)flash_data;
   11699 			break;
   11700 		} else {
   11701 			/*
   11702 			 * If we've gotten here, then things are probably
   11703 			 * completely hosed, but if the error condition is
   11704 			 * detected, it won't hurt to give it another try...
   11705 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11706 			 */
   11707 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11708 			if (hsfsts & HSFSTS_ERR) {
   11709 				/* Repeat for some time before giving up. */
   11710 				continue;
   11711 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11712 				break;
   11713 		}
   11714 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11715 
   11716 	return error;
   11717 }
   11718 
   11719 /******************************************************************************
   11720  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11721  *
   11722  * sc - pointer to wm_hw structure
   11723  * index - The index of the byte to read.
   11724  * data - Pointer to a byte to store the value read.
   11725  *****************************************************************************/
   11726 static int32_t
   11727 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11728 {
   11729 	int32_t status;
   11730 	uint32_t word = 0;
   11731 
   11732 	status = wm_read_ich8_data(sc, index, 1, &word);
   11733 	if (status == 0)
   11734 		*data = (uint8_t)word;
   11735 	else
   11736 		*data = 0;
   11737 
   11738 	return status;
   11739 }
   11740 
   11741 /******************************************************************************
   11742  * Reads a word from the NVM using the ICH8 flash access registers.
   11743  *
   11744  * sc - pointer to wm_hw structure
   11745  * index - The starting byte index of the word to read.
   11746  * data - Pointer to a word to store the value read.
   11747  *****************************************************************************/
   11748 static int32_t
   11749 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11750 {
   11751 	int32_t status;
   11752 	uint32_t word = 0;
   11753 
   11754 	status = wm_read_ich8_data(sc, index, 2, &word);
   11755 	if (status == 0)
   11756 		*data = (uint16_t)word;
   11757 	else
   11758 		*data = 0;
   11759 
   11760 	return status;
   11761 }
   11762 
   11763 /******************************************************************************
   11764  * Reads a dword from the NVM using the ICH8 flash access registers.
   11765  *
   11766  * sc - pointer to wm_hw structure
   11767  * index - The starting byte index of the word to read.
   11768  * data - Pointer to a word to store the value read.
   11769  *****************************************************************************/
   11770 static int32_t
   11771 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11772 {
   11773 	int32_t status;
   11774 
   11775 	status = wm_read_ich8_data(sc, index, 4, data);
   11776 	return status;
   11777 }
   11778 
   11779 /******************************************************************************
   11780  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11781  * register.
   11782  *
   11783  * sc - Struct containing variables accessed by shared code
   11784  * offset - offset of word in the EEPROM to read
   11785  * data - word read from the EEPROM
   11786  * words - number of words to read
   11787  *****************************************************************************/
   11788 static int
   11789 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11790 {
   11791 	int32_t  error = 0;
   11792 	uint32_t flash_bank = 0;
   11793 	uint32_t act_offset = 0;
   11794 	uint32_t bank_offset = 0;
   11795 	uint16_t word = 0;
   11796 	uint16_t i = 0;
   11797 
   11798 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11799 		device_xname(sc->sc_dev), __func__));
   11800 
   11801 	/*
   11802 	 * We need to know which is the valid flash bank.  In the event
   11803 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11804 	 * managing flash_bank.  So it cannot be trusted and needs
   11805 	 * to be updated with each read.
   11806 	 */
   11807 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11808 	if (error) {
   11809 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11810 			device_xname(sc->sc_dev)));
   11811 		flash_bank = 0;
   11812 	}
   11813 
   11814 	/*
   11815 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11816 	 * size
   11817 	 */
   11818 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11819 
   11820 	error = wm_get_swfwhw_semaphore(sc);
   11821 	if (error) {
   11822 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11823 		    __func__);
   11824 		return error;
   11825 	}
   11826 
   11827 	for (i = 0; i < words; i++) {
   11828 		/* The NVM part needs a byte offset, hence * 2 */
   11829 		act_offset = bank_offset + ((offset + i) * 2);
   11830 		error = wm_read_ich8_word(sc, act_offset, &word);
   11831 		if (error) {
   11832 			aprint_error_dev(sc->sc_dev,
   11833 			    "%s: failed to read NVM\n", __func__);
   11834 			break;
   11835 		}
   11836 		data[i] = word;
   11837 	}
   11838 
   11839 	wm_put_swfwhw_semaphore(sc);
   11840 	return error;
   11841 }
   11842 
   11843 /******************************************************************************
   11844  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11845  * register.
   11846  *
   11847  * sc - Struct containing variables accessed by shared code
   11848  * offset - offset of word in the EEPROM to read
   11849  * data - word read from the EEPROM
   11850  * words - number of words to read
   11851  *****************************************************************************/
   11852 static int
   11853 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11854 {
   11855 	int32_t  error = 0;
   11856 	uint32_t flash_bank = 0;
   11857 	uint32_t act_offset = 0;
   11858 	uint32_t bank_offset = 0;
   11859 	uint32_t dword = 0;
   11860 	uint16_t i = 0;
   11861 
   11862 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11863 		device_xname(sc->sc_dev), __func__));
   11864 
   11865 	/*
   11866 	 * We need to know which is the valid flash bank.  In the event
   11867 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11868 	 * managing flash_bank.  So it cannot be trusted and needs
   11869 	 * to be updated with each read.
   11870 	 */
   11871 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11872 	if (error) {
   11873 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11874 			device_xname(sc->sc_dev)));
   11875 		flash_bank = 0;
   11876 	}
   11877 
   11878 	/*
   11879 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11880 	 * size
   11881 	 */
   11882 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11883 
   11884 	error = wm_get_swfwhw_semaphore(sc);
   11885 	if (error) {
   11886 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11887 		    __func__);
   11888 		return error;
   11889 	}
   11890 
   11891 	for (i = 0; i < words; i++) {
   11892 		/* The NVM part needs a byte offset, hence * 2 */
   11893 		act_offset = bank_offset + ((offset + i) * 2);
   11894 		/* but we must read dword aligned, so mask ... */
   11895 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11896 		if (error) {
   11897 			aprint_error_dev(sc->sc_dev,
   11898 			    "%s: failed to read NVM\n", __func__);
   11899 			break;
   11900 		}
   11901 		/* ... and pick out low or high word */
   11902 		if ((act_offset & 0x2) == 0)
   11903 			data[i] = (uint16_t)(dword & 0xFFFF);
   11904 		else
   11905 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11906 	}
   11907 
   11908 	wm_put_swfwhw_semaphore(sc);
   11909 	return error;
   11910 }
   11911 
   11912 /* iNVM */
   11913 
   11914 static int
   11915 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11916 {
   11917 	int32_t  rv = 0;
   11918 	uint32_t invm_dword;
   11919 	uint16_t i;
   11920 	uint8_t record_type, word_address;
   11921 
   11922 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11923 		device_xname(sc->sc_dev), __func__));
   11924 
   11925 	for (i = 0; i < INVM_SIZE; i++) {
   11926 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11927 		/* Get record type */
   11928 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11929 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11930 			break;
   11931 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11932 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11933 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11934 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11935 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11936 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11937 			if (word_address == address) {
   11938 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11939 				rv = 0;
   11940 				break;
   11941 			}
   11942 		}
   11943 	}
   11944 
   11945 	return rv;
   11946 }
   11947 
   11948 static int
   11949 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11950 {
   11951 	int rv = 0;
   11952 	int i;
   11953 
   11954 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11955 		device_xname(sc->sc_dev), __func__));
   11956 
   11957 	for (i = 0; i < words; i++) {
   11958 		switch (offset + i) {
   11959 		case NVM_OFF_MACADDR:
   11960 		case NVM_OFF_MACADDR1:
   11961 		case NVM_OFF_MACADDR2:
   11962 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11963 			if (rv != 0) {
   11964 				data[i] = 0xffff;
   11965 				rv = -1;
   11966 			}
   11967 			break;
   11968 		case NVM_OFF_CFG2:
   11969 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11970 			if (rv != 0) {
   11971 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11972 				rv = 0;
   11973 			}
   11974 			break;
   11975 		case NVM_OFF_CFG4:
   11976 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11977 			if (rv != 0) {
   11978 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11979 				rv = 0;
   11980 			}
   11981 			break;
   11982 		case NVM_OFF_LED_1_CFG:
   11983 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11984 			if (rv != 0) {
   11985 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11986 				rv = 0;
   11987 			}
   11988 			break;
   11989 		case NVM_OFF_LED_0_2_CFG:
   11990 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11991 			if (rv != 0) {
   11992 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11993 				rv = 0;
   11994 			}
   11995 			break;
   11996 		case NVM_OFF_ID_LED_SETTINGS:
   11997 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11998 			if (rv != 0) {
   11999 				*data = ID_LED_RESERVED_FFFF;
   12000 				rv = 0;
   12001 			}
   12002 			break;
   12003 		default:
   12004 			DPRINTF(WM_DEBUG_NVM,
   12005 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12006 			*data = NVM_RESERVED_WORD;
   12007 			break;
   12008 		}
   12009 	}
   12010 
   12011 	return rv;
   12012 }
   12013 
   12014 /* Lock, detecting NVM type, validate checksum, version and read */
   12015 
   12016 /*
   12017  * wm_nvm_acquire:
   12018  *
   12019  *	Perform the EEPROM handshake required on some chips.
   12020  */
   12021 static int
   12022 wm_nvm_acquire(struct wm_softc *sc)
   12023 {
   12024 	uint32_t reg;
   12025 	int x;
   12026 	int ret = 0;
   12027 
   12028 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12029 		device_xname(sc->sc_dev), __func__));
   12030 
   12031 	if (sc->sc_type >= WM_T_ICH8) {
   12032 		ret = wm_get_nvm_ich8lan(sc);
   12033 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12034 		ret = wm_get_swfwhw_semaphore(sc);
   12035 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12036 		/* This will also do wm_get_swsm_semaphore() if needed */
   12037 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12038 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12039 		ret = wm_get_swsm_semaphore(sc);
   12040 	}
   12041 
   12042 	if (ret) {
   12043 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12044 			__func__);
   12045 		return 1;
   12046 	}
   12047 
   12048 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12049 		reg = CSR_READ(sc, WMREG_EECD);
   12050 
   12051 		/* Request EEPROM access. */
   12052 		reg |= EECD_EE_REQ;
   12053 		CSR_WRITE(sc, WMREG_EECD, reg);
   12054 
   12055 		/* ..and wait for it to be granted. */
   12056 		for (x = 0; x < 1000; x++) {
   12057 			reg = CSR_READ(sc, WMREG_EECD);
   12058 			if (reg & EECD_EE_GNT)
   12059 				break;
   12060 			delay(5);
   12061 		}
   12062 		if ((reg & EECD_EE_GNT) == 0) {
   12063 			aprint_error_dev(sc->sc_dev,
   12064 			    "could not acquire EEPROM GNT\n");
   12065 			reg &= ~EECD_EE_REQ;
   12066 			CSR_WRITE(sc, WMREG_EECD, reg);
   12067 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12068 				wm_put_swfwhw_semaphore(sc);
   12069 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12070 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12071 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12072 				wm_put_swsm_semaphore(sc);
   12073 			return 1;
   12074 		}
   12075 	}
   12076 
   12077 	return 0;
   12078 }
   12079 
   12080 /*
   12081  * wm_nvm_release:
   12082  *
   12083  *	Release the EEPROM mutex.
   12084  */
   12085 static void
   12086 wm_nvm_release(struct wm_softc *sc)
   12087 {
   12088 	uint32_t reg;
   12089 
   12090 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12091 		device_xname(sc->sc_dev), __func__));
   12092 
   12093 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12094 		reg = CSR_READ(sc, WMREG_EECD);
   12095 		reg &= ~EECD_EE_REQ;
   12096 		CSR_WRITE(sc, WMREG_EECD, reg);
   12097 	}
   12098 
   12099 	if (sc->sc_type >= WM_T_ICH8) {
   12100 		wm_put_nvm_ich8lan(sc);
   12101 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12102 		wm_put_swfwhw_semaphore(sc);
   12103 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12104 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12105 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12106 		wm_put_swsm_semaphore(sc);
   12107 }
   12108 
   12109 static int
   12110 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12111 {
   12112 	uint32_t eecd = 0;
   12113 
   12114 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12115 	    || sc->sc_type == WM_T_82583) {
   12116 		eecd = CSR_READ(sc, WMREG_EECD);
   12117 
   12118 		/* Isolate bits 15 & 16 */
   12119 		eecd = ((eecd >> 15) & 0x03);
   12120 
   12121 		/* If both bits are set, device is Flash type */
   12122 		if (eecd == 0x03)
   12123 			return 0;
   12124 	}
   12125 	return 1;
   12126 }
   12127 
   12128 static int
   12129 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12130 {
   12131 	uint32_t eec;
   12132 
   12133 	eec = CSR_READ(sc, WMREG_EEC);
   12134 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12135 		return 1;
   12136 
   12137 	return 0;
   12138 }
   12139 
   12140 /*
   12141  * wm_nvm_validate_checksum
   12142  *
   12143  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12144  */
   12145 static int
   12146 wm_nvm_validate_checksum(struct wm_softc *sc)
   12147 {
   12148 	uint16_t checksum;
   12149 	uint16_t eeprom_data;
   12150 #ifdef WM_DEBUG
   12151 	uint16_t csum_wordaddr, valid_checksum;
   12152 #endif
   12153 	int i;
   12154 
   12155 	checksum = 0;
   12156 
   12157 	/* Don't check for I211 */
   12158 	if (sc->sc_type == WM_T_I211)
   12159 		return 0;
   12160 
   12161 #ifdef WM_DEBUG
   12162 	if (sc->sc_type == WM_T_PCH_LPT) {
   12163 		csum_wordaddr = NVM_OFF_COMPAT;
   12164 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12165 	} else {
   12166 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12167 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12168 	}
   12169 
   12170 	/* Dump EEPROM image for debug */
   12171 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12172 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12173 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12174 		/* XXX PCH_SPT? */
   12175 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12176 		if ((eeprom_data & valid_checksum) == 0) {
   12177 			DPRINTF(WM_DEBUG_NVM,
   12178 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12179 				device_xname(sc->sc_dev), eeprom_data,
   12180 				    valid_checksum));
   12181 		}
   12182 	}
   12183 
   12184 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12185 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12186 		for (i = 0; i < NVM_SIZE; i++) {
   12187 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12188 				printf("XXXX ");
   12189 			else
   12190 				printf("%04hx ", eeprom_data);
   12191 			if (i % 8 == 7)
   12192 				printf("\n");
   12193 		}
   12194 	}
   12195 
   12196 #endif /* WM_DEBUG */
   12197 
   12198 	for (i = 0; i < NVM_SIZE; i++) {
   12199 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12200 			return 1;
   12201 		checksum += eeprom_data;
   12202 	}
   12203 
   12204 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12205 #ifdef WM_DEBUG
   12206 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12207 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12208 #endif
   12209 	}
   12210 
   12211 	return 0;
   12212 }
   12213 
   12214 static void
   12215 wm_nvm_version_invm(struct wm_softc *sc)
   12216 {
   12217 	uint32_t dword;
   12218 
   12219 	/*
   12220 	 * Linux's code to decode version is very strange, so we don't
   12221 	 * obey that algorithm and just use word 61 as the document.
   12222 	 * Perhaps it's not perfect though...
   12223 	 *
   12224 	 * Example:
   12225 	 *
   12226 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12227 	 */
   12228 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12229 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12230 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12231 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12232 }
   12233 
   12234 static void
   12235 wm_nvm_version(struct wm_softc *sc)
   12236 {
   12237 	uint16_t major, minor, build, patch;
   12238 	uint16_t uid0, uid1;
   12239 	uint16_t nvm_data;
   12240 	uint16_t off;
   12241 	bool check_version = false;
   12242 	bool check_optionrom = false;
   12243 	bool have_build = false;
   12244 	bool have_uid = true;
   12245 
   12246 	/*
   12247 	 * Version format:
   12248 	 *
   12249 	 * XYYZ
   12250 	 * X0YZ
   12251 	 * X0YY
   12252 	 *
   12253 	 * Example:
   12254 	 *
   12255 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12256 	 *	82571	0x50a6	5.10.6?
   12257 	 *	82572	0x506a	5.6.10?
   12258 	 *	82572EI	0x5069	5.6.9?
   12259 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12260 	 *		0x2013	2.1.3?
   12261 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12262 	 */
   12263 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12264 	switch (sc->sc_type) {
   12265 	case WM_T_82571:
   12266 	case WM_T_82572:
   12267 	case WM_T_82574:
   12268 	case WM_T_82583:
   12269 		check_version = true;
   12270 		check_optionrom = true;
   12271 		have_build = true;
   12272 		break;
   12273 	case WM_T_82575:
   12274 	case WM_T_82576:
   12275 	case WM_T_82580:
   12276 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12277 			check_version = true;
   12278 		break;
   12279 	case WM_T_I211:
   12280 		wm_nvm_version_invm(sc);
   12281 		have_uid = false;
   12282 		goto printver;
   12283 	case WM_T_I210:
   12284 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12285 			wm_nvm_version_invm(sc);
   12286 			have_uid = false;
   12287 			goto printver;
   12288 		}
   12289 		/* FALLTHROUGH */
   12290 	case WM_T_I350:
   12291 	case WM_T_I354:
   12292 		check_version = true;
   12293 		check_optionrom = true;
   12294 		break;
   12295 	default:
   12296 		return;
   12297 	}
   12298 	if (check_version) {
   12299 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12300 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12301 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12302 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12303 			build = nvm_data & NVM_BUILD_MASK;
   12304 			have_build = true;
   12305 		} else
   12306 			minor = nvm_data & 0x00ff;
   12307 
   12308 		/* Decimal */
   12309 		minor = (minor / 16) * 10 + (minor % 16);
   12310 		sc->sc_nvm_ver_major = major;
   12311 		sc->sc_nvm_ver_minor = minor;
   12312 
   12313 printver:
   12314 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12315 		    sc->sc_nvm_ver_minor);
   12316 		if (have_build) {
   12317 			sc->sc_nvm_ver_build = build;
   12318 			aprint_verbose(".%d", build);
   12319 		}
   12320 	}
   12321 	if (check_optionrom) {
   12322 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12323 		/* Option ROM Version */
   12324 		if ((off != 0x0000) && (off != 0xffff)) {
   12325 			off += NVM_COMBO_VER_OFF;
   12326 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12327 			wm_nvm_read(sc, off, 1, &uid0);
   12328 			if ((uid0 != 0) && (uid0 != 0xffff)
   12329 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12330 				/* 16bits */
   12331 				major = uid0 >> 8;
   12332 				build = (uid0 << 8) | (uid1 >> 8);
   12333 				patch = uid1 & 0x00ff;
   12334 				aprint_verbose(", option ROM Version %d.%d.%d",
   12335 				    major, build, patch);
   12336 			}
   12337 		}
   12338 	}
   12339 
   12340 	if (have_uid) {
   12341 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12342 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12343 	}
   12344 }
   12345 
   12346 /*
   12347  * wm_nvm_read:
   12348  *
   12349  *	Read data from the serial EEPROM.
   12350  */
   12351 static int
   12352 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12353 {
   12354 	int rv;
   12355 
   12356 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12357 		device_xname(sc->sc_dev), __func__));
   12358 
   12359 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12360 		return 1;
   12361 
   12362 	if (wm_nvm_acquire(sc))
   12363 		return 1;
   12364 
   12365 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12366 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12367 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12368 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12369 	else if (sc->sc_type == WM_T_PCH_SPT)
   12370 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12371 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12372 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12373 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12374 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12375 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12376 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12377 	else
   12378 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12379 
   12380 	wm_nvm_release(sc);
   12381 	return rv;
   12382 }
   12383 
   12384 /*
   12385  * Hardware semaphores.
   12386  * Very complexed...
   12387  */
   12388 
   12389 static int
   12390 wm_get_null(struct wm_softc *sc)
   12391 {
   12392 
   12393 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12394 		device_xname(sc->sc_dev), __func__));
   12395 	return 0;
   12396 }
   12397 
   12398 static void
   12399 wm_put_null(struct wm_softc *sc)
   12400 {
   12401 
   12402 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12403 		device_xname(sc->sc_dev), __func__));
   12404 	return;
   12405 }
   12406 
   12407 /*
   12408  * Get hardware semaphore.
   12409  * Same as e1000_get_hw_semaphore_generic()
   12410  */
   12411 static int
   12412 wm_get_swsm_semaphore(struct wm_softc *sc)
   12413 {
   12414 	int32_t timeout;
   12415 	uint32_t swsm;
   12416 
   12417 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12418 		device_xname(sc->sc_dev), __func__));
   12419 	KASSERT(sc->sc_nvm_wordsize > 0);
   12420 
   12421 	/* Get the SW semaphore. */
   12422 	timeout = sc->sc_nvm_wordsize + 1;
   12423 	while (timeout) {
   12424 		swsm = CSR_READ(sc, WMREG_SWSM);
   12425 
   12426 		if ((swsm & SWSM_SMBI) == 0)
   12427 			break;
   12428 
   12429 		delay(50);
   12430 		timeout--;
   12431 	}
   12432 
   12433 	if (timeout == 0) {
   12434 		aprint_error_dev(sc->sc_dev,
   12435 		    "could not acquire SWSM SMBI\n");
   12436 		return 1;
   12437 	}
   12438 
   12439 	/* Get the FW semaphore. */
   12440 	timeout = sc->sc_nvm_wordsize + 1;
   12441 	while (timeout) {
   12442 		swsm = CSR_READ(sc, WMREG_SWSM);
   12443 		swsm |= SWSM_SWESMBI;
   12444 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12445 		/* If we managed to set the bit we got the semaphore. */
   12446 		swsm = CSR_READ(sc, WMREG_SWSM);
   12447 		if (swsm & SWSM_SWESMBI)
   12448 			break;
   12449 
   12450 		delay(50);
   12451 		timeout--;
   12452 	}
   12453 
   12454 	if (timeout == 0) {
   12455 		aprint_error_dev(sc->sc_dev,
   12456 		    "could not acquire SWSM SWESMBI\n");
   12457 		/* Release semaphores */
   12458 		wm_put_swsm_semaphore(sc);
   12459 		return 1;
   12460 	}
   12461 	return 0;
   12462 }
   12463 
   12464 /*
   12465  * Put hardware semaphore.
   12466  * Same as e1000_put_hw_semaphore_generic()
   12467  */
   12468 static void
   12469 wm_put_swsm_semaphore(struct wm_softc *sc)
   12470 {
   12471 	uint32_t swsm;
   12472 
   12473 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12474 		device_xname(sc->sc_dev), __func__));
   12475 
   12476 	swsm = CSR_READ(sc, WMREG_SWSM);
   12477 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12478 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12479 }
   12480 
   12481 /*
   12482  * Get SW/FW semaphore.
   12483  * Same as e1000_acquire_swfw_sync_82575().
   12484  */
   12485 static int
   12486 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12487 {
   12488 	uint32_t swfw_sync;
   12489 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12490 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12491 	int timeout = 200;
   12492 
   12493 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12494 		device_xname(sc->sc_dev), __func__));
   12495 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12496 
   12497 	for (timeout = 0; timeout < 200; timeout++) {
   12498 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12499 			if (wm_get_swsm_semaphore(sc)) {
   12500 				aprint_error_dev(sc->sc_dev,
   12501 				    "%s: failed to get semaphore\n",
   12502 				    __func__);
   12503 				return 1;
   12504 			}
   12505 		}
   12506 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12507 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12508 			swfw_sync |= swmask;
   12509 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12510 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12511 				wm_put_swsm_semaphore(sc);
   12512 			return 0;
   12513 		}
   12514 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12515 			wm_put_swsm_semaphore(sc);
   12516 		delay(5000);
   12517 	}
   12518 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12519 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12520 	return 1;
   12521 }
   12522 
   12523 static void
   12524 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12525 {
   12526 	uint32_t swfw_sync;
   12527 
   12528 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12529 		device_xname(sc->sc_dev), __func__));
   12530 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12531 
   12532 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12533 		while (wm_get_swsm_semaphore(sc) != 0)
   12534 			continue;
   12535 	}
   12536 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12537 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12538 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12539 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12540 		wm_put_swsm_semaphore(sc);
   12541 }
   12542 
   12543 static int
   12544 wm_get_phy_82575(struct wm_softc *sc)
   12545 {
   12546 
   12547 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12548 		device_xname(sc->sc_dev), __func__));
   12549 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12550 }
   12551 
   12552 static void
   12553 wm_put_phy_82575(struct wm_softc *sc)
   12554 {
   12555 
   12556 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12557 		device_xname(sc->sc_dev), __func__));
   12558 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12559 }
   12560 
   12561 static int
   12562 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12563 {
   12564 	uint32_t ext_ctrl;
   12565 	int timeout = 200;
   12566 
   12567 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12568 		device_xname(sc->sc_dev), __func__));
   12569 
   12570 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12571 	for (timeout = 0; timeout < 200; timeout++) {
   12572 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12573 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12574 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12575 
   12576 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12577 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12578 			return 0;
   12579 		delay(5000);
   12580 	}
   12581 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12582 	    device_xname(sc->sc_dev), ext_ctrl);
   12583 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12584 	return 1;
   12585 }
   12586 
   12587 static void
   12588 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12589 {
   12590 	uint32_t ext_ctrl;
   12591 
   12592 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12593 		device_xname(sc->sc_dev), __func__));
   12594 
   12595 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12596 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12597 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12598 
   12599 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12600 }
   12601 
   12602 static int
   12603 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12604 {
   12605 	uint32_t ext_ctrl;
   12606 	int timeout;
   12607 
   12608 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12609 		device_xname(sc->sc_dev), __func__));
   12610 	mutex_enter(sc->sc_ich_phymtx);
   12611 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12612 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12613 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12614 			break;
   12615 		delay(1000);
   12616 	}
   12617 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12618 		printf("%s: SW has already locked the resource\n",
   12619 		    device_xname(sc->sc_dev));
   12620 		goto out;
   12621 	}
   12622 
   12623 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12624 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12625 	for (timeout = 0; timeout < 1000; timeout++) {
   12626 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12627 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12628 			break;
   12629 		delay(1000);
   12630 	}
   12631 	if (timeout >= 1000) {
   12632 		printf("%s: failed to acquire semaphore\n",
   12633 		    device_xname(sc->sc_dev));
   12634 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12635 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12636 		goto out;
   12637 	}
   12638 	return 0;
   12639 
   12640 out:
   12641 	mutex_exit(sc->sc_ich_phymtx);
   12642 	return 1;
   12643 }
   12644 
   12645 static void
   12646 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12647 {
   12648 	uint32_t ext_ctrl;
   12649 
   12650 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12651 		device_xname(sc->sc_dev), __func__));
   12652 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12653 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12654 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12655 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12656 	} else {
   12657 		printf("%s: Semaphore unexpectedly released\n",
   12658 		    device_xname(sc->sc_dev));
   12659 	}
   12660 
   12661 	mutex_exit(sc->sc_ich_phymtx);
   12662 }
   12663 
   12664 static int
   12665 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12666 {
   12667 
   12668 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12669 		device_xname(sc->sc_dev), __func__));
   12670 	mutex_enter(sc->sc_ich_nvmmtx);
   12671 
   12672 	return 0;
   12673 }
   12674 
   12675 static void
   12676 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12677 {
   12678 
   12679 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12680 		device_xname(sc->sc_dev), __func__));
   12681 	mutex_exit(sc->sc_ich_nvmmtx);
   12682 }
   12683 
   12684 static int
   12685 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12686 {
   12687 	int i = 0;
   12688 	uint32_t reg;
   12689 
   12690 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12691 		device_xname(sc->sc_dev), __func__));
   12692 
   12693 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12694 	do {
   12695 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12696 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12697 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12698 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12699 			break;
   12700 		delay(2*1000);
   12701 		i++;
   12702 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12703 
   12704 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12705 		wm_put_hw_semaphore_82573(sc);
   12706 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12707 		    device_xname(sc->sc_dev));
   12708 		return -1;
   12709 	}
   12710 
   12711 	return 0;
   12712 }
   12713 
   12714 static void
   12715 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12716 {
   12717 	uint32_t reg;
   12718 
   12719 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12720 		device_xname(sc->sc_dev), __func__));
   12721 
   12722 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12723 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12724 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12725 }
   12726 
   12727 /*
   12728  * Management mode and power management related subroutines.
   12729  * BMC, AMT, suspend/resume and EEE.
   12730  */
   12731 
   12732 #ifdef WM_WOL
   12733 static int
   12734 wm_check_mng_mode(struct wm_softc *sc)
   12735 {
   12736 	int rv;
   12737 
   12738 	switch (sc->sc_type) {
   12739 	case WM_T_ICH8:
   12740 	case WM_T_ICH9:
   12741 	case WM_T_ICH10:
   12742 	case WM_T_PCH:
   12743 	case WM_T_PCH2:
   12744 	case WM_T_PCH_LPT:
   12745 	case WM_T_PCH_SPT:
   12746 		rv = wm_check_mng_mode_ich8lan(sc);
   12747 		break;
   12748 	case WM_T_82574:
   12749 	case WM_T_82583:
   12750 		rv = wm_check_mng_mode_82574(sc);
   12751 		break;
   12752 	case WM_T_82571:
   12753 	case WM_T_82572:
   12754 	case WM_T_82573:
   12755 	case WM_T_80003:
   12756 		rv = wm_check_mng_mode_generic(sc);
   12757 		break;
   12758 	default:
   12759 		/* noting to do */
   12760 		rv = 0;
   12761 		break;
   12762 	}
   12763 
   12764 	return rv;
   12765 }
   12766 
   12767 static int
   12768 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12769 {
   12770 	uint32_t fwsm;
   12771 
   12772 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12773 
   12774 	if (((fwsm & FWSM_FW_VALID) != 0)
   12775 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12776 		return 1;
   12777 
   12778 	return 0;
   12779 }
   12780 
   12781 static int
   12782 wm_check_mng_mode_82574(struct wm_softc *sc)
   12783 {
   12784 	uint16_t data;
   12785 
   12786 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12787 
   12788 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12789 		return 1;
   12790 
   12791 	return 0;
   12792 }
   12793 
   12794 static int
   12795 wm_check_mng_mode_generic(struct wm_softc *sc)
   12796 {
   12797 	uint32_t fwsm;
   12798 
   12799 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12800 
   12801 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12802 		return 1;
   12803 
   12804 	return 0;
   12805 }
   12806 #endif /* WM_WOL */
   12807 
   12808 static int
   12809 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12810 {
   12811 	uint32_t manc, fwsm, factps;
   12812 
   12813 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12814 		return 0;
   12815 
   12816 	manc = CSR_READ(sc, WMREG_MANC);
   12817 
   12818 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12819 		device_xname(sc->sc_dev), manc));
   12820 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12821 		return 0;
   12822 
   12823 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12824 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12825 		factps = CSR_READ(sc, WMREG_FACTPS);
   12826 		if (((factps & FACTPS_MNGCG) == 0)
   12827 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12828 			return 1;
   12829 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12830 		uint16_t data;
   12831 
   12832 		factps = CSR_READ(sc, WMREG_FACTPS);
   12833 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12834 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12835 			device_xname(sc->sc_dev), factps, data));
   12836 		if (((factps & FACTPS_MNGCG) == 0)
   12837 		    && ((data & NVM_CFG2_MNGM_MASK)
   12838 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12839 			return 1;
   12840 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12841 	    && ((manc & MANC_ASF_EN) == 0))
   12842 		return 1;
   12843 
   12844 	return 0;
   12845 }
   12846 
   12847 static bool
   12848 wm_phy_resetisblocked(struct wm_softc *sc)
   12849 {
   12850 	bool blocked = false;
   12851 	uint32_t reg;
   12852 	int i = 0;
   12853 
   12854 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12855 		device_xname(sc->sc_dev), __func__));
   12856 
   12857 	switch (sc->sc_type) {
   12858 	case WM_T_ICH8:
   12859 	case WM_T_ICH9:
   12860 	case WM_T_ICH10:
   12861 	case WM_T_PCH:
   12862 	case WM_T_PCH2:
   12863 	case WM_T_PCH_LPT:
   12864 	case WM_T_PCH_SPT:
   12865 		do {
   12866 			reg = CSR_READ(sc, WMREG_FWSM);
   12867 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12868 				blocked = true;
   12869 				delay(10*1000);
   12870 				continue;
   12871 			}
   12872 			blocked = false;
   12873 		} while (blocked && (i++ < 30));
   12874 		return blocked;
   12875 		break;
   12876 	case WM_T_82571:
   12877 	case WM_T_82572:
   12878 	case WM_T_82573:
   12879 	case WM_T_82574:
   12880 	case WM_T_82583:
   12881 	case WM_T_80003:
   12882 		reg = CSR_READ(sc, WMREG_MANC);
   12883 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12884 			return true;
   12885 		else
   12886 			return false;
   12887 		break;
   12888 	default:
   12889 		/* no problem */
   12890 		break;
   12891 	}
   12892 
   12893 	return false;
   12894 }
   12895 
   12896 static void
   12897 wm_get_hw_control(struct wm_softc *sc)
   12898 {
   12899 	uint32_t reg;
   12900 
   12901 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12902 		device_xname(sc->sc_dev), __func__));
   12903 
   12904 	if (sc->sc_type == WM_T_82573) {
   12905 		reg = CSR_READ(sc, WMREG_SWSM);
   12906 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12907 	} else if (sc->sc_type >= WM_T_82571) {
   12908 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12909 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12910 	}
   12911 }
   12912 
   12913 static void
   12914 wm_release_hw_control(struct wm_softc *sc)
   12915 {
   12916 	uint32_t reg;
   12917 
   12918 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12919 		device_xname(sc->sc_dev), __func__));
   12920 
   12921 	if (sc->sc_type == WM_T_82573) {
   12922 		reg = CSR_READ(sc, WMREG_SWSM);
   12923 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12924 	} else if (sc->sc_type >= WM_T_82571) {
   12925 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12926 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12927 	}
   12928 }
   12929 
   12930 static void
   12931 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12932 {
   12933 	uint32_t reg;
   12934 
   12935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12936 		device_xname(sc->sc_dev), __func__));
   12937 
   12938 	if (sc->sc_type < WM_T_PCH2)
   12939 		return;
   12940 
   12941 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12942 
   12943 	if (gate)
   12944 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12945 	else
   12946 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12947 
   12948 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12949 }
   12950 
   12951 static void
   12952 wm_smbustopci(struct wm_softc *sc)
   12953 {
   12954 	uint32_t fwsm, reg;
   12955 	int rv = 0;
   12956 
   12957 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12958 		device_xname(sc->sc_dev), __func__));
   12959 
   12960 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12961 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12962 
   12963 	/* Disable ULP */
   12964 	wm_ulp_disable(sc);
   12965 
   12966 	/* Acquire PHY semaphore */
   12967 	sc->phy.acquire(sc);
   12968 
   12969 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12970 	switch (sc->sc_type) {
   12971 	case WM_T_PCH_LPT:
   12972 	case WM_T_PCH_SPT:
   12973 		if (wm_phy_is_accessible_pchlan(sc))
   12974 			break;
   12975 
   12976 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12977 		reg |= CTRL_EXT_FORCE_SMBUS;
   12978 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12979 #if 0
   12980 		/* XXX Isn't this required??? */
   12981 		CSR_WRITE_FLUSH(sc);
   12982 #endif
   12983 		delay(50 * 1000);
   12984 		/* FALLTHROUGH */
   12985 	case WM_T_PCH2:
   12986 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12987 			break;
   12988 		/* FALLTHROUGH */
   12989 	case WM_T_PCH:
   12990 		if (sc->sc_type == WM_T_PCH)
   12991 			if ((fwsm & FWSM_FW_VALID) != 0)
   12992 				break;
   12993 
   12994 		if (wm_phy_resetisblocked(sc) == true) {
   12995 			printf("XXX reset is blocked(3)\n");
   12996 			break;
   12997 		}
   12998 
   12999 		wm_toggle_lanphypc_pch_lpt(sc);
   13000 
   13001 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13002 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13003 				break;
   13004 
   13005 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13006 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13007 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13008 
   13009 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13010 				break;
   13011 			rv = -1;
   13012 		}
   13013 		break;
   13014 	default:
   13015 		break;
   13016 	}
   13017 
   13018 	/* Release semaphore */
   13019 	sc->phy.release(sc);
   13020 
   13021 	if (rv == 0) {
   13022 		if (wm_phy_resetisblocked(sc)) {
   13023 			printf("XXX reset is blocked(4)\n");
   13024 			goto out;
   13025 		}
   13026 		wm_reset_phy(sc);
   13027 		if (wm_phy_resetisblocked(sc))
   13028 			printf("XXX reset is blocked(4)\n");
   13029 	}
   13030 
   13031 out:
   13032 	/*
   13033 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13034 	 */
   13035 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13036 		delay(10*1000);
   13037 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13038 	}
   13039 }
   13040 
   13041 static void
   13042 wm_init_manageability(struct wm_softc *sc)
   13043 {
   13044 
   13045 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13046 		device_xname(sc->sc_dev), __func__));
   13047 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13048 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13049 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13050 
   13051 		/* Disable hardware interception of ARP */
   13052 		manc &= ~MANC_ARP_EN;
   13053 
   13054 		/* Enable receiving management packets to the host */
   13055 		if (sc->sc_type >= WM_T_82571) {
   13056 			manc |= MANC_EN_MNG2HOST;
   13057 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13058 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13059 		}
   13060 
   13061 		CSR_WRITE(sc, WMREG_MANC, manc);
   13062 	}
   13063 }
   13064 
   13065 static void
   13066 wm_release_manageability(struct wm_softc *sc)
   13067 {
   13068 
   13069 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13070 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13071 
   13072 		manc |= MANC_ARP_EN;
   13073 		if (sc->sc_type >= WM_T_82571)
   13074 			manc &= ~MANC_EN_MNG2HOST;
   13075 
   13076 		CSR_WRITE(sc, WMREG_MANC, manc);
   13077 	}
   13078 }
   13079 
   13080 static void
   13081 wm_get_wakeup(struct wm_softc *sc)
   13082 {
   13083 
   13084 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13085 	switch (sc->sc_type) {
   13086 	case WM_T_82573:
   13087 	case WM_T_82583:
   13088 		sc->sc_flags |= WM_F_HAS_AMT;
   13089 		/* FALLTHROUGH */
   13090 	case WM_T_80003:
   13091 	case WM_T_82575:
   13092 	case WM_T_82576:
   13093 	case WM_T_82580:
   13094 	case WM_T_I350:
   13095 	case WM_T_I354:
   13096 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13097 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13098 		/* FALLTHROUGH */
   13099 	case WM_T_82541:
   13100 	case WM_T_82541_2:
   13101 	case WM_T_82547:
   13102 	case WM_T_82547_2:
   13103 	case WM_T_82571:
   13104 	case WM_T_82572:
   13105 	case WM_T_82574:
   13106 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13107 		break;
   13108 	case WM_T_ICH8:
   13109 	case WM_T_ICH9:
   13110 	case WM_T_ICH10:
   13111 	case WM_T_PCH:
   13112 	case WM_T_PCH2:
   13113 	case WM_T_PCH_LPT:
   13114 	case WM_T_PCH_SPT:
   13115 		sc->sc_flags |= WM_F_HAS_AMT;
   13116 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13117 		break;
   13118 	default:
   13119 		break;
   13120 	}
   13121 
   13122 	/* 1: HAS_MANAGE */
   13123 	if (wm_enable_mng_pass_thru(sc) != 0)
   13124 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13125 
   13126 	/*
   13127 	 * Note that the WOL flags is set after the resetting of the eeprom
   13128 	 * stuff
   13129 	 */
   13130 }
   13131 
   13132 /*
   13133  * Unconfigure Ultra Low Power mode.
   13134  * Only for I217 and newer (see below).
   13135  */
   13136 static void
   13137 wm_ulp_disable(struct wm_softc *sc)
   13138 {
   13139 	uint32_t reg;
   13140 	int i = 0;
   13141 
   13142 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13143 		device_xname(sc->sc_dev), __func__));
   13144 	/* Exclude old devices */
   13145 	if ((sc->sc_type < WM_T_PCH_LPT)
   13146 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13147 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13148 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13149 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13150 		return;
   13151 
   13152 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13153 		/* Request ME un-configure ULP mode in the PHY */
   13154 		reg = CSR_READ(sc, WMREG_H2ME);
   13155 		reg &= ~H2ME_ULP;
   13156 		reg |= H2ME_ENFORCE_SETTINGS;
   13157 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13158 
   13159 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13160 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13161 			if (i++ == 30) {
   13162 				printf("%s timed out\n", __func__);
   13163 				return;
   13164 			}
   13165 			delay(10 * 1000);
   13166 		}
   13167 		reg = CSR_READ(sc, WMREG_H2ME);
   13168 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13169 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13170 
   13171 		return;
   13172 	}
   13173 
   13174 	/* Acquire semaphore */
   13175 	sc->phy.acquire(sc);
   13176 
   13177 	/* Toggle LANPHYPC */
   13178 	wm_toggle_lanphypc_pch_lpt(sc);
   13179 
   13180 	/* Unforce SMBus mode in PHY */
   13181 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13182 	if (reg == 0x0000 || reg == 0xffff) {
   13183 		uint32_t reg2;
   13184 
   13185 		printf("%s: Force SMBus first.\n", __func__);
   13186 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13187 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13188 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13189 		delay(50 * 1000);
   13190 
   13191 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13192 	}
   13193 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13194 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13195 
   13196 	/* Unforce SMBus mode in MAC */
   13197 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13198 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13199 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13200 
   13201 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13202 	reg |= HV_PM_CTRL_K1_ENA;
   13203 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13204 
   13205 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13206 	reg &= ~(I218_ULP_CONFIG1_IND
   13207 	    | I218_ULP_CONFIG1_STICKY_ULP
   13208 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13209 	    | I218_ULP_CONFIG1_WOL_HOST
   13210 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13211 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13212 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13213 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13214 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13215 	reg |= I218_ULP_CONFIG1_START;
   13216 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13217 
   13218 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13219 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13220 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13221 
   13222 	/* Release semaphore */
   13223 	sc->phy.release(sc);
   13224 	wm_gmii_reset(sc);
   13225 	delay(50 * 1000);
   13226 }
   13227 
   13228 /* WOL in the newer chipset interfaces (pchlan) */
   13229 static void
   13230 wm_enable_phy_wakeup(struct wm_softc *sc)
   13231 {
   13232 #if 0
   13233 	uint16_t preg;
   13234 
   13235 	/* Copy MAC RARs to PHY RARs */
   13236 
   13237 	/* Copy MAC MTA to PHY MTA */
   13238 
   13239 	/* Configure PHY Rx Control register */
   13240 
   13241 	/* Enable PHY wakeup in MAC register */
   13242 
   13243 	/* Configure and enable PHY wakeup in PHY registers */
   13244 
   13245 	/* Activate PHY wakeup */
   13246 
   13247 	/* XXX */
   13248 #endif
   13249 }
   13250 
   13251 /* Power down workaround on D3 */
   13252 static void
   13253 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13254 {
   13255 	uint32_t reg;
   13256 	int i;
   13257 
   13258 	for (i = 0; i < 2; i++) {
   13259 		/* Disable link */
   13260 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13261 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13262 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13263 
   13264 		/*
   13265 		 * Call gig speed drop workaround on Gig disable before
   13266 		 * accessing any PHY registers
   13267 		 */
   13268 		if (sc->sc_type == WM_T_ICH8)
   13269 			wm_gig_downshift_workaround_ich8lan(sc);
   13270 
   13271 		/* Write VR power-down enable */
   13272 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13273 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13274 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13275 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13276 
   13277 		/* Read it back and test */
   13278 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13279 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13280 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13281 			break;
   13282 
   13283 		/* Issue PHY reset and repeat at most one more time */
   13284 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13285 	}
   13286 }
   13287 
   13288 static void
   13289 wm_enable_wakeup(struct wm_softc *sc)
   13290 {
   13291 	uint32_t reg, pmreg;
   13292 	pcireg_t pmode;
   13293 
   13294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13295 		device_xname(sc->sc_dev), __func__));
   13296 
   13297 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13298 		&pmreg, NULL) == 0)
   13299 		return;
   13300 
   13301 	/* Advertise the wakeup capability */
   13302 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13303 	    | CTRL_SWDPIN(3));
   13304 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13305 
   13306 	/* ICH workaround */
   13307 	switch (sc->sc_type) {
   13308 	case WM_T_ICH8:
   13309 	case WM_T_ICH9:
   13310 	case WM_T_ICH10:
   13311 	case WM_T_PCH:
   13312 	case WM_T_PCH2:
   13313 	case WM_T_PCH_LPT:
   13314 	case WM_T_PCH_SPT:
   13315 		/* Disable gig during WOL */
   13316 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13317 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13318 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13319 		if (sc->sc_type == WM_T_PCH)
   13320 			wm_gmii_reset(sc);
   13321 
   13322 		/* Power down workaround */
   13323 		if (sc->sc_phytype == WMPHY_82577) {
   13324 			struct mii_softc *child;
   13325 
   13326 			/* Assume that the PHY is copper */
   13327 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13328 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13329 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13330 				    (768 << 5) | 25, 0x0444); /* magic num */
   13331 		}
   13332 		break;
   13333 	default:
   13334 		break;
   13335 	}
   13336 
   13337 	/* Keep the laser running on fiber adapters */
   13338 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13339 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13340 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13341 		reg |= CTRL_EXT_SWDPIN(3);
   13342 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13343 	}
   13344 
   13345 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13346 #if 0	/* for the multicast packet */
   13347 	reg |= WUFC_MC;
   13348 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13349 #endif
   13350 
   13351 	if (sc->sc_type >= WM_T_PCH)
   13352 		wm_enable_phy_wakeup(sc);
   13353 	else {
   13354 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13355 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13356 	}
   13357 
   13358 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13359 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13360 		|| (sc->sc_type == WM_T_PCH2))
   13361 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13362 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13363 
   13364 	/* Request PME */
   13365 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13366 #if 0
   13367 	/* Disable WOL */
   13368 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13369 #else
   13370 	/* For WOL */
   13371 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13372 #endif
   13373 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13374 }
   13375 
   13376 /* LPLU */
   13377 
   13378 static void
   13379 wm_lplu_d0_disable(struct wm_softc *sc)
   13380 {
   13381 	uint32_t reg;
   13382 
   13383 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13384 		device_xname(sc->sc_dev), __func__));
   13385 
   13386 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13387 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13388 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13389 }
   13390 
   13391 static void
   13392 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13393 {
   13394 	uint32_t reg;
   13395 
   13396 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13397 		device_xname(sc->sc_dev), __func__));
   13398 
   13399 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13400 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13401 	reg |= HV_OEM_BITS_ANEGNOW;
   13402 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13403 }
   13404 
   13405 /* EEE */
   13406 
   13407 static void
   13408 wm_set_eee_i350(struct wm_softc *sc)
   13409 {
   13410 	uint32_t ipcnfg, eeer;
   13411 
   13412 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13413 	eeer = CSR_READ(sc, WMREG_EEER);
   13414 
   13415 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13416 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13417 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13418 		    | EEER_LPI_FC);
   13419 	} else {
   13420 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13421 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13422 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13423 		    | EEER_LPI_FC);
   13424 	}
   13425 
   13426 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13427 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13428 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13429 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13430 }
   13431 
   13432 /*
   13433  * Workarounds (mainly PHY related).
   13434  * Basically, PHY's workarounds are in the PHY drivers.
   13435  */
   13436 
   13437 /* Work-around for 82566 Kumeran PCS lock loss */
   13438 static void
   13439 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13440 {
   13441 #if 0
   13442 	int miistatus, active, i;
   13443 	int reg;
   13444 
   13445 	miistatus = sc->sc_mii.mii_media_status;
   13446 
   13447 	/* If the link is not up, do nothing */
   13448 	if ((miistatus & IFM_ACTIVE) == 0)
   13449 		return;
   13450 
   13451 	active = sc->sc_mii.mii_media_active;
   13452 
   13453 	/* Nothing to do if the link is other than 1Gbps */
   13454 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13455 		return;
   13456 
   13457 	for (i = 0; i < 10; i++) {
   13458 		/* read twice */
   13459 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13460 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13461 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13462 			goto out;	/* GOOD! */
   13463 
   13464 		/* Reset the PHY */
   13465 		wm_gmii_reset(sc);
   13466 		delay(5*1000);
   13467 	}
   13468 
   13469 	/* Disable GigE link negotiation */
   13470 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13471 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13472 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13473 
   13474 	/*
   13475 	 * Call gig speed drop workaround on Gig disable before accessing
   13476 	 * any PHY registers.
   13477 	 */
   13478 	wm_gig_downshift_workaround_ich8lan(sc);
   13479 
   13480 out:
   13481 	return;
   13482 #endif
   13483 }
   13484 
   13485 /* WOL from S5 stops working */
   13486 static void
   13487 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13488 {
   13489 	uint16_t kmrn_reg;
   13490 
   13491 	/* Only for igp3 */
   13492 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13493 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13494 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13495 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13496 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13497 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13498 	}
   13499 }
   13500 
   13501 /*
   13502  * Workaround for pch's PHYs
   13503  * XXX should be moved to new PHY driver?
   13504  */
   13505 static void
   13506 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13507 {
   13508 
   13509 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13510 		device_xname(sc->sc_dev), __func__));
   13511 	KASSERT(sc->sc_type == WM_T_PCH);
   13512 
   13513 	if (sc->sc_phytype == WMPHY_82577)
   13514 		wm_set_mdio_slow_mode_hv(sc);
   13515 
   13516 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13517 
   13518 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13519 
   13520 	/* 82578 */
   13521 	if (sc->sc_phytype == WMPHY_82578) {
   13522 		struct mii_softc *child;
   13523 
   13524 		/*
   13525 		 * Return registers to default by doing a soft reset then
   13526 		 * writing 0x3140 to the control register
   13527 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13528 		 */
   13529 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13530 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13531 			PHY_RESET(child);
   13532 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13533 			    0x3140);
   13534 		}
   13535 	}
   13536 
   13537 	/* Select page 0 */
   13538 	sc->phy.acquire(sc);
   13539 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13540 	sc->phy.release(sc);
   13541 
   13542 	/*
   13543 	 * Configure the K1 Si workaround during phy reset assuming there is
   13544 	 * link so that it disables K1 if link is in 1Gbps.
   13545 	 */
   13546 	wm_k1_gig_workaround_hv(sc, 1);
   13547 }
   13548 
   13549 static void
   13550 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13551 {
   13552 
   13553 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13554 		device_xname(sc->sc_dev), __func__));
   13555 	KASSERT(sc->sc_type == WM_T_PCH2);
   13556 
   13557 	wm_set_mdio_slow_mode_hv(sc);
   13558 }
   13559 
   13560 static int
   13561 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13562 {
   13563 	int k1_enable = sc->sc_nvm_k1_enabled;
   13564 
   13565 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13566 		device_xname(sc->sc_dev), __func__));
   13567 
   13568 	if (sc->phy.acquire(sc) != 0)
   13569 		return -1;
   13570 
   13571 	if (link) {
   13572 		k1_enable = 0;
   13573 
   13574 		/* Link stall fix for link up */
   13575 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13576 	} else {
   13577 		/* Link stall fix for link down */
   13578 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13579 	}
   13580 
   13581 	wm_configure_k1_ich8lan(sc, k1_enable);
   13582 	sc->phy.release(sc);
   13583 
   13584 	return 0;
   13585 }
   13586 
   13587 static void
   13588 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13589 {
   13590 	uint32_t reg;
   13591 
   13592 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13593 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13594 	    reg | HV_KMRN_MDIO_SLOW);
   13595 }
   13596 
   13597 static void
   13598 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13599 {
   13600 	uint32_t ctrl, ctrl_ext, tmp;
   13601 	uint16_t kmrn_reg;
   13602 
   13603 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13604 
   13605 	if (k1_enable)
   13606 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13607 	else
   13608 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13609 
   13610 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13611 
   13612 	delay(20);
   13613 
   13614 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13615 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13616 
   13617 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13618 	tmp |= CTRL_FRCSPD;
   13619 
   13620 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13621 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13622 	CSR_WRITE_FLUSH(sc);
   13623 	delay(20);
   13624 
   13625 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13626 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13627 	CSR_WRITE_FLUSH(sc);
   13628 	delay(20);
   13629 }
   13630 
   13631 /* special case - for 82575 - need to do manual init ... */
   13632 static void
   13633 wm_reset_init_script_82575(struct wm_softc *sc)
   13634 {
   13635 	/*
   13636 	 * remark: this is untested code - we have no board without EEPROM
   13637 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13638 	 */
   13639 
   13640 	/* SerDes configuration via SERDESCTRL */
   13641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13645 
   13646 	/* CCM configuration via CCMCTL register */
   13647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13648 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13649 
   13650 	/* PCIe lanes configuration */
   13651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13654 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13655 
   13656 	/* PCIe PLL Configuration */
   13657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13658 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13659 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13660 }
   13661 
   13662 static void
   13663 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13664 {
   13665 	uint32_t reg;
   13666 	uint16_t nvmword;
   13667 	int rv;
   13668 
   13669 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13670 		return;
   13671 
   13672 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13673 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13674 	if (rv != 0) {
   13675 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13676 		    __func__);
   13677 		return;
   13678 	}
   13679 
   13680 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13681 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13682 		reg |= MDICNFG_DEST;
   13683 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13684 		reg |= MDICNFG_COM_MDIO;
   13685 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13686 }
   13687 
   13688 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13689 
   13690 static bool
   13691 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13692 {
   13693 	int i;
   13694 	uint32_t reg;
   13695 	uint16_t id1, id2;
   13696 
   13697 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13698 		device_xname(sc->sc_dev), __func__));
   13699 	id1 = id2 = 0xffff;
   13700 	for (i = 0; i < 2; i++) {
   13701 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13702 		if (MII_INVALIDID(id1))
   13703 			continue;
   13704 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13705 		if (MII_INVALIDID(id2))
   13706 			continue;
   13707 		break;
   13708 	}
   13709 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13710 		goto out;
   13711 	}
   13712 
   13713 	if (sc->sc_type < WM_T_PCH_LPT) {
   13714 		sc->phy.release(sc);
   13715 		wm_set_mdio_slow_mode_hv(sc);
   13716 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13717 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13718 		sc->phy.acquire(sc);
   13719 	}
   13720 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13721 		printf("XXX return with false\n");
   13722 		return false;
   13723 	}
   13724 out:
   13725 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13726 		/* Only unforce SMBus if ME is not active */
   13727 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13728 			/* Unforce SMBus mode in PHY */
   13729 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13730 			    CV_SMB_CTRL);
   13731 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13732 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13733 			    CV_SMB_CTRL, reg);
   13734 
   13735 			/* Unforce SMBus mode in MAC */
   13736 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13737 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13738 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13739 		}
   13740 	}
   13741 	return true;
   13742 }
   13743 
   13744 static void
   13745 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13746 {
   13747 	uint32_t reg;
   13748 	int i;
   13749 
   13750 	/* Set PHY Config Counter to 50msec */
   13751 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13752 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13753 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13754 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13755 
   13756 	/* Toggle LANPHYPC */
   13757 	reg = CSR_READ(sc, WMREG_CTRL);
   13758 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13759 	reg &= ~CTRL_LANPHYPC_VALUE;
   13760 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13761 	CSR_WRITE_FLUSH(sc);
   13762 	delay(1000);
   13763 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13764 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13765 	CSR_WRITE_FLUSH(sc);
   13766 
   13767 	if (sc->sc_type < WM_T_PCH_LPT)
   13768 		delay(50 * 1000);
   13769 	else {
   13770 		i = 20;
   13771 
   13772 		do {
   13773 			delay(5 * 1000);
   13774 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13775 		    && i--);
   13776 
   13777 		delay(30 * 1000);
   13778 	}
   13779 }
   13780 
   13781 static int
   13782 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13783 {
   13784 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13785 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13786 	uint32_t rxa;
   13787 	uint16_t scale = 0, lat_enc = 0;
   13788 	int32_t obff_hwm = 0;
   13789 	int64_t lat_ns, value;
   13790 
   13791 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13792 		device_xname(sc->sc_dev), __func__));
   13793 
   13794 	if (link) {
   13795 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13796 		uint32_t status;
   13797 		uint16_t speed;
   13798 		pcireg_t preg;
   13799 
   13800 		status = CSR_READ(sc, WMREG_STATUS);
   13801 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13802 		case STATUS_SPEED_10:
   13803 			speed = 10;
   13804 			break;
   13805 		case STATUS_SPEED_100:
   13806 			speed = 100;
   13807 			break;
   13808 		case STATUS_SPEED_1000:
   13809 			speed = 1000;
   13810 			break;
   13811 		default:
   13812 			device_printf(sc->sc_dev, "Unknown speed "
   13813 			    "(status = %08x)\n", status);
   13814 			return -1;
   13815 		}
   13816 
   13817 		/* Rx Packet Buffer Allocation size (KB) */
   13818 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13819 
   13820 		/*
   13821 		 * Determine the maximum latency tolerated by the device.
   13822 		 *
   13823 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13824 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13825 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13826 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13827 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13828 		 */
   13829 		lat_ns = ((int64_t)rxa * 1024 -
   13830 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   13831 			+ ETHER_HDR_LEN))) * 8 * 1000;
   13832 		if (lat_ns < 0)
   13833 			lat_ns = 0;
   13834 		else
   13835 			lat_ns /= speed;
   13836 		value = lat_ns;
   13837 
   13838 		while (value > LTRV_VALUE) {
   13839 			scale ++;
   13840 			value = howmany(value, __BIT(5));
   13841 		}
   13842 		if (scale > LTRV_SCALE_MAX) {
   13843 			printf("%s: Invalid LTR latency scale %d\n",
   13844 			    device_xname(sc->sc_dev), scale);
   13845 			return -1;
   13846 		}
   13847 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13848 
   13849 		/* Determine the maximum latency tolerated by the platform */
   13850 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13851 		    WM_PCI_LTR_CAP_LPT);
   13852 		max_snoop = preg & 0xffff;
   13853 		max_nosnoop = preg >> 16;
   13854 
   13855 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13856 
   13857 		if (lat_enc > max_ltr_enc) {
   13858 			lat_enc = max_ltr_enc;
   13859 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   13860 			    * PCI_LTR_SCALETONS(
   13861 				    __SHIFTOUT(lat_enc,
   13862 					PCI_LTR_MAXSNOOPLAT_SCALE));
   13863 		}
   13864 
   13865 		if (lat_ns) {
   13866 			lat_ns *= speed * 1000;
   13867 			lat_ns /= 8;
   13868 			lat_ns /= 1000000000;
   13869 			obff_hwm = (int32_t)(rxa - lat_ns);
   13870 		}
   13871 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   13872 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   13873 			    "(rxa = %d, lat_ns = %d)\n",
   13874 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   13875 			return -1;
   13876 		}
   13877 	}
   13878 	/* Snoop and No-Snoop latencies the same */
   13879 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13880 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13881 
   13882 	/* Set OBFF high water mark */
   13883 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   13884 	reg |= obff_hwm;
   13885 	CSR_WRITE(sc, WMREG_SVT, reg);
   13886 
   13887 	/* Enable OBFF */
   13888 	reg = CSR_READ(sc, WMREG_SVCR);
   13889 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   13890 	CSR_WRITE(sc, WMREG_SVCR, reg);
   13891 
   13892 	return 0;
   13893 }
   13894 
   13895 /*
   13896  * I210 Errata 25 and I211 Errata 10
   13897  * Slow System Clock.
   13898  */
   13899 static void
   13900 wm_pll_workaround_i210(struct wm_softc *sc)
   13901 {
   13902 	uint32_t mdicnfg, wuc;
   13903 	uint32_t reg;
   13904 	pcireg_t pcireg;
   13905 	uint32_t pmreg;
   13906 	uint16_t nvmword, tmp_nvmword;
   13907 	int phyval;
   13908 	bool wa_done = false;
   13909 	int i;
   13910 
   13911 	/* Save WUC and MDICNFG registers */
   13912 	wuc = CSR_READ(sc, WMREG_WUC);
   13913 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13914 
   13915 	reg = mdicnfg & ~MDICNFG_DEST;
   13916 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13917 
   13918 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13919 		nvmword = INVM_DEFAULT_AL;
   13920 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13921 
   13922 	/* Get Power Management cap offset */
   13923 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13924 		&pmreg, NULL) == 0)
   13925 		return;
   13926 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13927 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13928 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13929 
   13930 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13931 			break; /* OK */
   13932 		}
   13933 
   13934 		wa_done = true;
   13935 		/* Directly reset the internal PHY */
   13936 		reg = CSR_READ(sc, WMREG_CTRL);
   13937 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13938 
   13939 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13940 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13942 
   13943 		CSR_WRITE(sc, WMREG_WUC, 0);
   13944 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13945 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13946 
   13947 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13948 		    pmreg + PCI_PMCSR);
   13949 		pcireg |= PCI_PMCSR_STATE_D3;
   13950 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13951 		    pmreg + PCI_PMCSR, pcireg);
   13952 		delay(1000);
   13953 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13954 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13955 		    pmreg + PCI_PMCSR, pcireg);
   13956 
   13957 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13958 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13959 
   13960 		/* Restore WUC register */
   13961 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13962 	}
   13963 
   13964 	/* Restore MDICNFG setting */
   13965 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13966 	if (wa_done)
   13967 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13968 }
   13969 
   13970 static void
   13971 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   13972 {
   13973 	uint32_t reg;
   13974 
   13975 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13976 		device_xname(sc->sc_dev), __func__));
   13977 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   13978 
   13979 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13980 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   13981 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13982 
   13983 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   13984 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   13985 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   13986 }
   13987