Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.528
      1 /*	$NetBSD: if_wm.c,v 1.528 2017/07/18 08:22:55 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.528 2017/07/18 08:22:55 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_phy_post_reset(struct wm_softc *);
    683 static void	wm_write_smbus_addr(struct wm_softc *);
    684 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    685 static void	wm_initialize_hardware_bits(struct wm_softc *);
    686 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    687 static void	wm_reset_phy(struct wm_softc *);
    688 static void	wm_flush_desc_rings(struct wm_softc *);
    689 static void	wm_reset(struct wm_softc *);
    690 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    691 static void	wm_rxdrain(struct wm_rxqueue *);
    692 static void	wm_rss_getkey(uint8_t *);
    693 static void	wm_init_rss(struct wm_softc *);
    694 static void	wm_adjust_qnum(struct wm_softc *, int);
    695 static inline bool	wm_is_using_msix(struct wm_softc *);
    696 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    697 static int	wm_softint_establish(struct wm_softc *, int, int);
    698 static int	wm_setup_legacy(struct wm_softc *);
    699 static int	wm_setup_msix(struct wm_softc *);
    700 static int	wm_init(struct ifnet *);
    701 static int	wm_init_locked(struct ifnet *);
    702 static void	wm_turnon(struct wm_softc *);
    703 static void	wm_turnoff(struct wm_softc *);
    704 static void	wm_stop(struct ifnet *, int);
    705 static void	wm_stop_locked(struct ifnet *, int);
    706 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    707 static void	wm_82547_txfifo_stall(void *);
    708 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    709 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    710 /* DMA related */
    711 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    713 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    714 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    715     struct wm_txqueue *);
    716 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    717 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    718 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    719     struct wm_rxqueue *);
    720 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    722 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    723 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    725 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    726 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    727     struct wm_txqueue *);
    728 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    729     struct wm_rxqueue *);
    730 static int	wm_alloc_txrx_queues(struct wm_softc *);
    731 static void	wm_free_txrx_queues(struct wm_softc *);
    732 static int	wm_init_txrx_queues(struct wm_softc *);
    733 /* Start */
    734 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    735     struct wm_txsoft *, uint32_t *, uint8_t *);
    736 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    737 static void	wm_start(struct ifnet *);
    738 static void	wm_start_locked(struct ifnet *);
    739 static int	wm_transmit(struct ifnet *, struct mbuf *);
    740 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    741 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    742 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    743     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    744 static void	wm_nq_start(struct ifnet *);
    745 static void	wm_nq_start_locked(struct ifnet *);
    746 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static void	wm_deferred_start_locked(struct wm_txqueue *);
    750 static void	wm_handle_queue(void *);
    751 /* Interrupt */
    752 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    754 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    755 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    756 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    757 static void	wm_linkintr(struct wm_softc *, uint32_t);
    758 static int	wm_intr_legacy(void *);
    759 static inline void	wm_txrxintr_disable(struct wm_queue *);
    760 static inline void	wm_txrxintr_enable(struct wm_queue *);
    761 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    762 static int	wm_txrxintr_msix(void *);
    763 static int	wm_linkintr_msix(void *);
    764 
    765 /*
    766  * Media related.
    767  * GMII, SGMII, TBI, SERDES and SFP.
    768  */
    769 /* Common */
    770 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    771 /* GMII related */
    772 static void	wm_gmii_reset(struct wm_softc *);
    773 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    774 static int	wm_get_phy_id_82575(struct wm_softc *);
    775 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    776 static int	wm_gmii_mediachange(struct ifnet *);
    777 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    778 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    779 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    780 static int	wm_gmii_i82543_readreg(device_t, int, int);
    781 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    782 static int	wm_gmii_mdic_readreg(device_t, int, int);
    783 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    784 static int	wm_gmii_i82544_readreg(device_t, int, int);
    785 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    786 static int	wm_gmii_i80003_readreg(device_t, int, int);
    787 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    788 static int	wm_gmii_bm_readreg(device_t, int, int);
    789 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    790 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    791 static int	wm_gmii_hv_readreg(device_t, int, int);
    792 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    793 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    794 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    795 static int	wm_gmii_82580_readreg(device_t, int, int);
    796 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    797 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    798 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    799 static void	wm_gmii_statchg(struct ifnet *);
    800 /*
    801  * kumeran related (80003, ICH* and PCH*).
    802  * These functions are not for accessing MII registers but for accessing
    803  * kumeran specific registers.
    804  */
    805 static int	wm_kmrn_readreg(struct wm_softc *, int);
    806 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    807 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    808 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    809 /* SGMII */
    810 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    811 static int	wm_sgmii_readreg(device_t, int, int);
    812 static void	wm_sgmii_writereg(device_t, int, int, int);
    813 /* TBI related */
    814 static void	wm_tbi_mediainit(struct wm_softc *);
    815 static int	wm_tbi_mediachange(struct ifnet *);
    816 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    817 static int	wm_check_for_link(struct wm_softc *);
    818 static void	wm_tbi_tick(struct wm_softc *);
    819 /* SERDES related */
    820 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    821 static int	wm_serdes_mediachange(struct ifnet *);
    822 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_serdes_tick(struct wm_softc *);
    824 /* SFP related */
    825 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    826 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    827 
    828 /*
    829  * NVM related.
    830  * Microwire, SPI (w/wo EERD) and Flash.
    831  */
    832 /* Misc functions */
    833 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    834 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    835 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    836 /* Microwire */
    837 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    838 /* SPI */
    839 static int	wm_nvm_ready_spi(struct wm_softc *);
    840 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    841 /* Using with EERD */
    842 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    843 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    844 /* Flash */
    845 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    846     unsigned int *);
    847 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    848 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    849 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    850 	uint32_t *);
    851 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    852 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    853 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    854 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    855 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    856 /* iNVM */
    857 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    858 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    859 /* Lock, detecting NVM type, validate checksum and read */
    860 static int	wm_nvm_acquire(struct wm_softc *);
    861 static void	wm_nvm_release(struct wm_softc *);
    862 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    863 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    864 static int	wm_nvm_validate_checksum(struct wm_softc *);
    865 static void	wm_nvm_version_invm(struct wm_softc *);
    866 static void	wm_nvm_version(struct wm_softc *);
    867 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    868 
    869 /*
    870  * Hardware semaphores.
    871  * Very complexed...
    872  */
    873 static int	wm_get_null(struct wm_softc *);
    874 static void	wm_put_null(struct wm_softc *);
    875 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    876 static void	wm_put_swsm_semaphore(struct wm_softc *);
    877 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    878 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    879 static int	wm_get_phy_82575(struct wm_softc *);
    880 static void	wm_put_phy_82575(struct wm_softc *);
    881 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    882 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    883 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    884 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    885 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    886 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    887 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    888 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    889 
    890 /*
    891  * Management mode and power management related subroutines.
    892  * BMC, AMT, suspend/resume and EEE.
    893  */
    894 #if 0
    895 static int	wm_check_mng_mode(struct wm_softc *);
    896 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    897 static int	wm_check_mng_mode_82574(struct wm_softc *);
    898 static int	wm_check_mng_mode_generic(struct wm_softc *);
    899 #endif
    900 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    901 static bool	wm_phy_resetisblocked(struct wm_softc *);
    902 static void	wm_get_hw_control(struct wm_softc *);
    903 static void	wm_release_hw_control(struct wm_softc *);
    904 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    905 static void	wm_smbustopci(struct wm_softc *);
    906 static void	wm_init_manageability(struct wm_softc *);
    907 static void	wm_release_manageability(struct wm_softc *);
    908 static void	wm_get_wakeup(struct wm_softc *);
    909 static void	wm_ulp_disable(struct wm_softc *);
    910 static void	wm_enable_phy_wakeup(struct wm_softc *);
    911 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    912 static void	wm_enable_wakeup(struct wm_softc *);
    913 /* LPLU (Low Power Link Up) */
    914 static void	wm_lplu_d0_disable(struct wm_softc *);
    915 /* EEE */
    916 static void	wm_set_eee_i350(struct wm_softc *);
    917 
    918 /*
    919  * Workarounds (mainly PHY related).
    920  * Basically, PHY's workarounds are in the PHY drivers.
    921  */
    922 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    924 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    925 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    926 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    927 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    928 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    929 static void	wm_reset_init_script_82575(struct wm_softc *);
    930 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    931 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    932 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    933 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    934 static void	wm_pll_workaround_i210(struct wm_softc *);
    935 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    936 
    937 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    938     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    939 
    940 /*
    941  * Devices supported by this driver.
    942  */
    943 static const struct wm_product {
    944 	pci_vendor_id_t		wmp_vendor;
    945 	pci_product_id_t	wmp_product;
    946 	const char		*wmp_name;
    947 	wm_chip_type		wmp_type;
    948 	uint32_t		wmp_flags;
    949 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    950 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    951 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    952 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    953 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    954 } wm_products[] = {
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    956 	  "Intel i82542 1000BASE-X Ethernet",
    957 	  WM_T_82542_2_1,	WMP_F_FIBER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    960 	  "Intel i82543GC 1000BASE-X Ethernet",
    961 	  WM_T_82543,		WMP_F_FIBER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    964 	  "Intel i82543GC 1000BASE-T Ethernet",
    965 	  WM_T_82543,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    968 	  "Intel i82544EI 1000BASE-T Ethernet",
    969 	  WM_T_82544,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    972 	  "Intel i82544EI 1000BASE-X Ethernet",
    973 	  WM_T_82544,		WMP_F_FIBER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    976 	  "Intel i82544GC 1000BASE-T Ethernet",
    977 	  WM_T_82544,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    980 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    981 	  WM_T_82544,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    984 	  "Intel i82540EM 1000BASE-T Ethernet",
    985 	  WM_T_82540,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    988 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    989 	  WM_T_82540,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    992 	  "Intel i82540EP 1000BASE-T Ethernet",
    993 	  WM_T_82540,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    996 	  "Intel i82540EP 1000BASE-T Ethernet",
    997 	  WM_T_82540,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1000 	  "Intel i82540EP 1000BASE-T Ethernet",
   1001 	  WM_T_82540,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1004 	  "Intel i82545EM 1000BASE-T Ethernet",
   1005 	  WM_T_82545,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1008 	  "Intel i82545GM 1000BASE-T Ethernet",
   1009 	  WM_T_82545_3,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1012 	  "Intel i82545GM 1000BASE-X Ethernet",
   1013 	  WM_T_82545_3,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1016 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1017 	  WM_T_82545_3,		WMP_F_SERDES },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1020 	  "Intel i82546EB 1000BASE-T Ethernet",
   1021 	  WM_T_82546,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1024 	  "Intel i82546EB 1000BASE-T Ethernet",
   1025 	  WM_T_82546,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1028 	  "Intel i82545EM 1000BASE-X Ethernet",
   1029 	  WM_T_82545,		WMP_F_FIBER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1032 	  "Intel i82546EB 1000BASE-X Ethernet",
   1033 	  WM_T_82546,		WMP_F_FIBER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1036 	  "Intel i82546GB 1000BASE-T Ethernet",
   1037 	  WM_T_82546_3,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1040 	  "Intel i82546GB 1000BASE-X Ethernet",
   1041 	  WM_T_82546_3,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1044 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1045 	  WM_T_82546_3,		WMP_F_SERDES },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1048 	  "i82546GB quad-port Gigabit Ethernet",
   1049 	  WM_T_82546_3,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1052 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1053 	  WM_T_82546_3,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1056 	  "Intel PRO/1000MT (82546GB)",
   1057 	  WM_T_82546_3,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1060 	  "Intel i82541EI 1000BASE-T Ethernet",
   1061 	  WM_T_82541,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1064 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1065 	  WM_T_82541,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1068 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1069 	  WM_T_82541,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1072 	  "Intel i82541ER 1000BASE-T Ethernet",
   1073 	  WM_T_82541_2,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1076 	  "Intel i82541GI 1000BASE-T Ethernet",
   1077 	  WM_T_82541_2,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1080 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1081 	  WM_T_82541_2,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1084 	  "Intel i82541PI 1000BASE-T Ethernet",
   1085 	  WM_T_82541_2,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1088 	  "Intel i82547EI 1000BASE-T Ethernet",
   1089 	  WM_T_82547,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1092 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1093 	  WM_T_82547,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1096 	  "Intel i82547GI 1000BASE-T Ethernet",
   1097 	  WM_T_82547_2,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1100 	  "Intel PRO/1000 PT (82571EB)",
   1101 	  WM_T_82571,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1104 	  "Intel PRO/1000 PF (82571EB)",
   1105 	  WM_T_82571,		WMP_F_FIBER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1108 	  "Intel PRO/1000 PB (82571EB)",
   1109 	  WM_T_82571,		WMP_F_SERDES },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1112 	  "Intel PRO/1000 QT (82571EB)",
   1113 	  WM_T_82571,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1116 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1117 	  WM_T_82571,		WMP_F_COPPER, },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1120 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1121 	  WM_T_82571,		WMP_F_COPPER, },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1124 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1125 	  WM_T_82571,		WMP_F_SERDES, },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1128 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1129 	  WM_T_82571,		WMP_F_SERDES, },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1132 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1133 	  WM_T_82571,		WMP_F_FIBER, },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1136 	  "Intel i82572EI 1000baseT Ethernet",
   1137 	  WM_T_82572,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1140 	  "Intel i82572EI 1000baseX Ethernet",
   1141 	  WM_T_82572,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1144 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1145 	  WM_T_82572,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1148 	  "Intel i82572EI 1000baseT Ethernet",
   1149 	  WM_T_82572,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1152 	  "Intel i82573E",
   1153 	  WM_T_82573,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1156 	  "Intel i82573E IAMT",
   1157 	  WM_T_82573,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1160 	  "Intel i82573L Gigabit Ethernet",
   1161 	  WM_T_82573,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1164 	  "Intel i82574L",
   1165 	  WM_T_82574,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1168 	  "Intel i82574L",
   1169 	  WM_T_82574,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1172 	  "Intel i82583V",
   1173 	  WM_T_82583,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1176 	  "i80003 dual 1000baseT Ethernet",
   1177 	  WM_T_80003,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1180 	  "i80003 dual 1000baseX Ethernet",
   1181 	  WM_T_80003,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1184 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1185 	  WM_T_80003,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1188 	  "Intel i80003 1000baseT Ethernet",
   1189 	  WM_T_80003,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1192 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1193 	  WM_T_80003,		WMP_F_SERDES },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1196 	  "Intel i82801H (M_AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1199 	  "Intel i82801H (AMT) LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1202 	  "Intel i82801H LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1205 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1208 	  "Intel i82801H (M) LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1211 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1214 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1217 	  "82567V-3 LAN Controller",
   1218 	  WM_T_ICH8,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1220 	  "82801I (AMT) LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1223 	  "82801I 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1226 	  "82801I (G) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1229 	  "82801I (GT) 10/100 LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1232 	  "82801I (C) LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1235 	  "82801I mobile LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1238 	  "82801I mobile (V) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1241 	  "82801I mobile (AMT) LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1244 	  "82567LM-4 LAN Controller",
   1245 	  WM_T_ICH9,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1247 	  "82567LM-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1250 	  "82567LF-2 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1253 	  "82567LM-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1256 	  "82567LF-3 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1259 	  "82567V-2 LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1262 	  "82567V-3? LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1265 	  "HANKSVILLE LAN Controller",
   1266 	  WM_T_ICH10,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1268 	  "PCH LAN (82577LM) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1271 	  "PCH LAN (82577LC) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1274 	  "PCH LAN (82578DM) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1277 	  "PCH LAN (82578DC) Controller",
   1278 	  WM_T_PCH,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1280 	  "PCH2 LAN (82579LM) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1283 	  "PCH2 LAN (82579V) Controller",
   1284 	  WM_T_PCH2,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1286 	  "82575EB dual-1000baseT Ethernet",
   1287 	  WM_T_82575,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1289 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1290 	  WM_T_82575,		WMP_F_SERDES },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1292 	  "82575GB quad-1000baseT Ethernet",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1295 	  "82575GB quad-1000baseT Ethernet (PM)",
   1296 	  WM_T_82575,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1298 	  "82576 1000BaseT Ethernet",
   1299 	  WM_T_82576,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1301 	  "82576 1000BaseX Ethernet",
   1302 	  WM_T_82576,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1305 	  "82576 gigabit Ethernet (SERDES)",
   1306 	  WM_T_82576,		WMP_F_SERDES },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1309 	  "82576 quad-1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1313 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1314 	  WM_T_82576,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1317 	  "82576 gigabit Ethernet",
   1318 	  WM_T_82576,		WMP_F_COPPER },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1321 	  "82576 gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1324 	  "82576 quad-gigabit Ethernet (SERDES)",
   1325 	  WM_T_82576,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1328 	  "82580 1000BaseT Ethernet",
   1329 	  WM_T_82580,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1331 	  "82580 1000BaseX Ethernet",
   1332 	  WM_T_82580,		WMP_F_FIBER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1335 	  "82580 1000BaseT Ethernet (SERDES)",
   1336 	  WM_T_82580,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1339 	  "82580 gigabit Ethernet (SGMII)",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1342 	  "82580 dual-1000BaseT Ethernet",
   1343 	  WM_T_82580,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1346 	  "82580 quad-1000BaseX Ethernet",
   1347 	  WM_T_82580,		WMP_F_FIBER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1350 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1354 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1355 	  WM_T_82580,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1358 	  "DH89XXCC 1000BASE-KX Ethernet",
   1359 	  WM_T_82580,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1362 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1363 	  WM_T_82580,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1366 	  "I350 Gigabit Network Connection",
   1367 	  WM_T_I350,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1370 	  "I350 Gigabit Fiber Network Connection",
   1371 	  WM_T_I350,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1374 	  "I350 Gigabit Backplane Connection",
   1375 	  WM_T_I350,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1378 	  "I350 Quad Port Gigabit Ethernet",
   1379 	  WM_T_I350,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1382 	  "I350 Gigabit Connection",
   1383 	  WM_T_I350,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1386 	  "I354 Gigabit Ethernet (KX)",
   1387 	  WM_T_I354,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1390 	  "I354 Gigabit Ethernet (SGMII)",
   1391 	  WM_T_I354,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1394 	  "I354 Gigabit Ethernet (2.5G)",
   1395 	  WM_T_I354,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1398 	  "I210-T1 Ethernet Server Adapter",
   1399 	  WM_T_I210,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1402 	  "I210 Ethernet (Copper OEM)",
   1403 	  WM_T_I210,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1406 	  "I210 Ethernet (Copper IT)",
   1407 	  WM_T_I210,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1410 	  "I210 Ethernet (FLASH less)",
   1411 	  WM_T_I210,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1414 	  "I210 Gigabit Ethernet (Fiber)",
   1415 	  WM_T_I210,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1418 	  "I210 Gigabit Ethernet (SERDES)",
   1419 	  WM_T_I210,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1422 	  "I210 Gigabit Ethernet (FLASH less)",
   1423 	  WM_T_I210,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1426 	  "I210 Gigabit Ethernet (SGMII)",
   1427 	  WM_T_I210,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1430 	  "I211 Ethernet (COPPER)",
   1431 	  WM_T_I211,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1433 	  "I217 V Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1436 	  "I217 LM Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1445 	  "I218 V Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1454 	  "I218 LM Ethernet Connection",
   1455 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1456 #if 0
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1467 	  "I219 V Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1482 	  "I219 LM Ethernet Connection",
   1483 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1484 #endif
   1485 	{ 0,			0,
   1486 	  NULL,
   1487 	  0,			0 },
   1488 };
   1489 
   1490 /*
   1491  * Register read/write functions.
   1492  * Other than CSR_{READ|WRITE}().
   1493  */
   1494 
   1495 #if 0 /* Not currently used */
   1496 static inline uint32_t
   1497 wm_io_read(struct wm_softc *sc, int reg)
   1498 {
   1499 
   1500 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1501 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1502 }
   1503 #endif
   1504 
   1505 static inline void
   1506 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1507 {
   1508 
   1509 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1510 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1511 }
   1512 
   1513 static inline void
   1514 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1515     uint32_t data)
   1516 {
   1517 	uint32_t regval;
   1518 	int i;
   1519 
   1520 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1521 
   1522 	CSR_WRITE(sc, reg, regval);
   1523 
   1524 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1525 		delay(5);
   1526 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1527 			break;
   1528 	}
   1529 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1530 		aprint_error("%s: WARNING:"
   1531 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1532 		    device_xname(sc->sc_dev), reg);
   1533 	}
   1534 }
   1535 
   1536 static inline void
   1537 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1538 {
   1539 	wa->wa_low = htole32(v & 0xffffffffU);
   1540 	if (sizeof(bus_addr_t) == 8)
   1541 		wa->wa_high = htole32((uint64_t) v >> 32);
   1542 	else
   1543 		wa->wa_high = 0;
   1544 }
   1545 
   1546 /*
   1547  * Descriptor sync/init functions.
   1548  */
   1549 static inline void
   1550 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1551 {
   1552 	struct wm_softc *sc = txq->txq_sc;
   1553 
   1554 	/* If it will wrap around, sync to the end of the ring. */
   1555 	if ((start + num) > WM_NTXDESC(txq)) {
   1556 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1557 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1558 		    (WM_NTXDESC(txq) - start), ops);
   1559 		num -= (WM_NTXDESC(txq) - start);
   1560 		start = 0;
   1561 	}
   1562 
   1563 	/* Now sync whatever is left. */
   1564 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1565 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1566 }
   1567 
   1568 static inline void
   1569 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1570 {
   1571 	struct wm_softc *sc = rxq->rxq_sc;
   1572 
   1573 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1574 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1575 }
   1576 
   1577 static inline void
   1578 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1579 {
   1580 	struct wm_softc *sc = rxq->rxq_sc;
   1581 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1582 	struct mbuf *m = rxs->rxs_mbuf;
   1583 
   1584 	/*
   1585 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1586 	 * so that the payload after the Ethernet header is aligned
   1587 	 * to a 4-byte boundary.
   1588 
   1589 	 * XXX BRAINDAMAGE ALERT!
   1590 	 * The stupid chip uses the same size for every buffer, which
   1591 	 * is set in the Receive Control register.  We are using the 2K
   1592 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1593 	 * reason, we can't "scoot" packets longer than the standard
   1594 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1595 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1596 	 * the upper layer copy the headers.
   1597 	 */
   1598 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1599 
   1600 	if (sc->sc_type == WM_T_82574) {
   1601 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1602 		rxd->erx_data.erxd_addr =
   1603 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1604 		rxd->erx_data.erxd_dd = 0;
   1605 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1606 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1607 
   1608 		rxd->nqrx_data.nrxd_paddr =
   1609 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1610 		/* Currently, split header is not supported. */
   1611 		rxd->nqrx_data.nrxd_haddr = 0;
   1612 	} else {
   1613 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1614 
   1615 		wm_set_dma_addr(&rxd->wrx_addr,
   1616 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1617 		rxd->wrx_len = 0;
   1618 		rxd->wrx_cksum = 0;
   1619 		rxd->wrx_status = 0;
   1620 		rxd->wrx_errors = 0;
   1621 		rxd->wrx_special = 0;
   1622 	}
   1623 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1624 
   1625 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1626 }
   1627 
   1628 /*
   1629  * Device driver interface functions and commonly used functions.
   1630  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1631  */
   1632 
   1633 /* Lookup supported device table */
   1634 static const struct wm_product *
   1635 wm_lookup(const struct pci_attach_args *pa)
   1636 {
   1637 	const struct wm_product *wmp;
   1638 
   1639 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1640 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1641 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1642 			return wmp;
   1643 	}
   1644 	return NULL;
   1645 }
   1646 
   1647 /* The match function (ca_match) */
   1648 static int
   1649 wm_match(device_t parent, cfdata_t cf, void *aux)
   1650 {
   1651 	struct pci_attach_args *pa = aux;
   1652 
   1653 	if (wm_lookup(pa) != NULL)
   1654 		return 1;
   1655 
   1656 	return 0;
   1657 }
   1658 
   1659 /* The attach function (ca_attach) */
   1660 static void
   1661 wm_attach(device_t parent, device_t self, void *aux)
   1662 {
   1663 	struct wm_softc *sc = device_private(self);
   1664 	struct pci_attach_args *pa = aux;
   1665 	prop_dictionary_t dict;
   1666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1667 	pci_chipset_tag_t pc = pa->pa_pc;
   1668 	int counts[PCI_INTR_TYPE_SIZE];
   1669 	pci_intr_type_t max_type;
   1670 	const char *eetype, *xname;
   1671 	bus_space_tag_t memt;
   1672 	bus_space_handle_t memh;
   1673 	bus_size_t memsize;
   1674 	int memh_valid;
   1675 	int i, error;
   1676 	const struct wm_product *wmp;
   1677 	prop_data_t ea;
   1678 	prop_number_t pn;
   1679 	uint8_t enaddr[ETHER_ADDR_LEN];
   1680 	char buf[256];
   1681 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1682 	pcireg_t preg, memtype;
   1683 	uint16_t eeprom_data, apme_mask;
   1684 	bool force_clear_smbi;
   1685 	uint32_t link_mode;
   1686 	uint32_t reg;
   1687 
   1688 	sc->sc_dev = self;
   1689 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1690 	sc->sc_core_stopping = false;
   1691 
   1692 	wmp = wm_lookup(pa);
   1693 #ifdef DIAGNOSTIC
   1694 	if (wmp == NULL) {
   1695 		printf("\n");
   1696 		panic("wm_attach: impossible");
   1697 	}
   1698 #endif
   1699 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1700 
   1701 	sc->sc_pc = pa->pa_pc;
   1702 	sc->sc_pcitag = pa->pa_tag;
   1703 
   1704 	if (pci_dma64_available(pa))
   1705 		sc->sc_dmat = pa->pa_dmat64;
   1706 	else
   1707 		sc->sc_dmat = pa->pa_dmat;
   1708 
   1709 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1710 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1711 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1712 
   1713 	sc->sc_type = wmp->wmp_type;
   1714 
   1715 	/* Set default function pointers */
   1716 	sc->phy.acquire = wm_get_null;
   1717 	sc->phy.release = wm_put_null;
   1718 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1719 
   1720 	if (sc->sc_type < WM_T_82543) {
   1721 		if (sc->sc_rev < 2) {
   1722 			aprint_error_dev(sc->sc_dev,
   1723 			    "i82542 must be at least rev. 2\n");
   1724 			return;
   1725 		}
   1726 		if (sc->sc_rev < 3)
   1727 			sc->sc_type = WM_T_82542_2_0;
   1728 	}
   1729 
   1730 	/*
   1731 	 * Disable MSI for Errata:
   1732 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1733 	 *
   1734 	 *  82544: Errata 25
   1735 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1736 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1737 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1738 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1739 	 *
   1740 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1741 	 *
   1742 	 *  82571 & 82572: Errata 63
   1743 	 */
   1744 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1745 	    || (sc->sc_type == WM_T_82572))
   1746 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1747 
   1748 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1749 	    || (sc->sc_type == WM_T_82580)
   1750 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1751 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1752 		sc->sc_flags |= WM_F_NEWQUEUE;
   1753 
   1754 	/* Set device properties (mactype) */
   1755 	dict = device_properties(sc->sc_dev);
   1756 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1757 
   1758 	/*
   1759 	 * Map the device.  All devices support memory-mapped acccess,
   1760 	 * and it is really required for normal operation.
   1761 	 */
   1762 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1763 	switch (memtype) {
   1764 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1765 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1766 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1767 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1768 		break;
   1769 	default:
   1770 		memh_valid = 0;
   1771 		break;
   1772 	}
   1773 
   1774 	if (memh_valid) {
   1775 		sc->sc_st = memt;
   1776 		sc->sc_sh = memh;
   1777 		sc->sc_ss = memsize;
   1778 	} else {
   1779 		aprint_error_dev(sc->sc_dev,
   1780 		    "unable to map device registers\n");
   1781 		return;
   1782 	}
   1783 
   1784 	/*
   1785 	 * In addition, i82544 and later support I/O mapped indirect
   1786 	 * register access.  It is not desirable (nor supported in
   1787 	 * this driver) to use it for normal operation, though it is
   1788 	 * required to work around bugs in some chip versions.
   1789 	 */
   1790 	if (sc->sc_type >= WM_T_82544) {
   1791 		/* First we have to find the I/O BAR. */
   1792 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1793 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1794 			if (memtype == PCI_MAPREG_TYPE_IO)
   1795 				break;
   1796 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1797 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1798 				i += 4;	/* skip high bits, too */
   1799 		}
   1800 		if (i < PCI_MAPREG_END) {
   1801 			/*
   1802 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1803 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1804 			 * It's no problem because newer chips has no this
   1805 			 * bug.
   1806 			 *
   1807 			 * The i8254x doesn't apparently respond when the
   1808 			 * I/O BAR is 0, which looks somewhat like it's not
   1809 			 * been configured.
   1810 			 */
   1811 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1812 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1813 				aprint_error_dev(sc->sc_dev,
   1814 				    "WARNING: I/O BAR at zero.\n");
   1815 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1816 					0, &sc->sc_iot, &sc->sc_ioh,
   1817 					NULL, &sc->sc_ios) == 0) {
   1818 				sc->sc_flags |= WM_F_IOH_VALID;
   1819 			} else {
   1820 				aprint_error_dev(sc->sc_dev,
   1821 				    "WARNING: unable to map I/O space\n");
   1822 			}
   1823 		}
   1824 
   1825 	}
   1826 
   1827 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1828 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1829 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1830 	if (sc->sc_type < WM_T_82542_2_1)
   1831 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1832 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1833 
   1834 	/* power up chip */
   1835 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1836 	    NULL)) && error != EOPNOTSUPP) {
   1837 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1838 		return;
   1839 	}
   1840 
   1841 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1842 
   1843 	/* Allocation settings */
   1844 	max_type = PCI_INTR_TYPE_MSIX;
   1845 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1846 	counts[PCI_INTR_TYPE_MSI] = 1;
   1847 	counts[PCI_INTR_TYPE_INTX] = 1;
   1848 	/* overridden by disable flags */
   1849 	if (wm_disable_msi != 0) {
   1850 		counts[PCI_INTR_TYPE_MSI] = 0;
   1851 		if (wm_disable_msix != 0) {
   1852 			max_type = PCI_INTR_TYPE_INTX;
   1853 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1854 		}
   1855 	} else if (wm_disable_msix != 0) {
   1856 		max_type = PCI_INTR_TYPE_MSI;
   1857 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1858 	}
   1859 
   1860 alloc_retry:
   1861 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1862 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1863 		return;
   1864 	}
   1865 
   1866 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1867 		error = wm_setup_msix(sc);
   1868 		if (error) {
   1869 			pci_intr_release(pc, sc->sc_intrs,
   1870 			    counts[PCI_INTR_TYPE_MSIX]);
   1871 
   1872 			/* Setup for MSI: Disable MSI-X */
   1873 			max_type = PCI_INTR_TYPE_MSI;
   1874 			counts[PCI_INTR_TYPE_MSI] = 1;
   1875 			counts[PCI_INTR_TYPE_INTX] = 1;
   1876 			goto alloc_retry;
   1877 		}
   1878 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1879 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1880 		error = wm_setup_legacy(sc);
   1881 		if (error) {
   1882 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1883 			    counts[PCI_INTR_TYPE_MSI]);
   1884 
   1885 			/* The next try is for INTx: Disable MSI */
   1886 			max_type = PCI_INTR_TYPE_INTX;
   1887 			counts[PCI_INTR_TYPE_INTX] = 1;
   1888 			goto alloc_retry;
   1889 		}
   1890 	} else {
   1891 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1892 		error = wm_setup_legacy(sc);
   1893 		if (error) {
   1894 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1895 			    counts[PCI_INTR_TYPE_INTX]);
   1896 			return;
   1897 		}
   1898 	}
   1899 
   1900 	/*
   1901 	 * Check the function ID (unit number of the chip).
   1902 	 */
   1903 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1904 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1905 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1906 	    || (sc->sc_type == WM_T_82580)
   1907 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1908 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1909 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1910 	else
   1911 		sc->sc_funcid = 0;
   1912 
   1913 	/*
   1914 	 * Determine a few things about the bus we're connected to.
   1915 	 */
   1916 	if (sc->sc_type < WM_T_82543) {
   1917 		/* We don't really know the bus characteristics here. */
   1918 		sc->sc_bus_speed = 33;
   1919 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1920 		/*
   1921 		 * CSA (Communication Streaming Architecture) is about as fast
   1922 		 * a 32-bit 66MHz PCI Bus.
   1923 		 */
   1924 		sc->sc_flags |= WM_F_CSA;
   1925 		sc->sc_bus_speed = 66;
   1926 		aprint_verbose_dev(sc->sc_dev,
   1927 		    "Communication Streaming Architecture\n");
   1928 		if (sc->sc_type == WM_T_82547) {
   1929 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1930 			callout_setfunc(&sc->sc_txfifo_ch,
   1931 					wm_82547_txfifo_stall, sc);
   1932 			aprint_verbose_dev(sc->sc_dev,
   1933 			    "using 82547 Tx FIFO stall work-around\n");
   1934 		}
   1935 	} else if (sc->sc_type >= WM_T_82571) {
   1936 		sc->sc_flags |= WM_F_PCIE;
   1937 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1938 		    && (sc->sc_type != WM_T_ICH10)
   1939 		    && (sc->sc_type != WM_T_PCH)
   1940 		    && (sc->sc_type != WM_T_PCH2)
   1941 		    && (sc->sc_type != WM_T_PCH_LPT)
   1942 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1943 			/* ICH* and PCH* have no PCIe capability registers */
   1944 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1945 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1946 				NULL) == 0)
   1947 				aprint_error_dev(sc->sc_dev,
   1948 				    "unable to find PCIe capability\n");
   1949 		}
   1950 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1951 	} else {
   1952 		reg = CSR_READ(sc, WMREG_STATUS);
   1953 		if (reg & STATUS_BUS64)
   1954 			sc->sc_flags |= WM_F_BUS64;
   1955 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1956 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1957 
   1958 			sc->sc_flags |= WM_F_PCIX;
   1959 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1960 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1961 				aprint_error_dev(sc->sc_dev,
   1962 				    "unable to find PCIX capability\n");
   1963 			else if (sc->sc_type != WM_T_82545_3 &&
   1964 				 sc->sc_type != WM_T_82546_3) {
   1965 				/*
   1966 				 * Work around a problem caused by the BIOS
   1967 				 * setting the max memory read byte count
   1968 				 * incorrectly.
   1969 				 */
   1970 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1971 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1972 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1973 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1974 
   1975 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1976 				    PCIX_CMD_BYTECNT_SHIFT;
   1977 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1978 				    PCIX_STATUS_MAXB_SHIFT;
   1979 				if (bytecnt > maxb) {
   1980 					aprint_verbose_dev(sc->sc_dev,
   1981 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1982 					    512 << bytecnt, 512 << maxb);
   1983 					pcix_cmd = (pcix_cmd &
   1984 					    ~PCIX_CMD_BYTECNT_MASK) |
   1985 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1986 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1987 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1988 					    pcix_cmd);
   1989 				}
   1990 			}
   1991 		}
   1992 		/*
   1993 		 * The quad port adapter is special; it has a PCIX-PCIX
   1994 		 * bridge on the board, and can run the secondary bus at
   1995 		 * a higher speed.
   1996 		 */
   1997 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1998 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1999 								      : 66;
   2000 		} else if (sc->sc_flags & WM_F_PCIX) {
   2001 			switch (reg & STATUS_PCIXSPD_MASK) {
   2002 			case STATUS_PCIXSPD_50_66:
   2003 				sc->sc_bus_speed = 66;
   2004 				break;
   2005 			case STATUS_PCIXSPD_66_100:
   2006 				sc->sc_bus_speed = 100;
   2007 				break;
   2008 			case STATUS_PCIXSPD_100_133:
   2009 				sc->sc_bus_speed = 133;
   2010 				break;
   2011 			default:
   2012 				aprint_error_dev(sc->sc_dev,
   2013 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2014 				    reg & STATUS_PCIXSPD_MASK);
   2015 				sc->sc_bus_speed = 66;
   2016 				break;
   2017 			}
   2018 		} else
   2019 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2020 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2021 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2022 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2023 	}
   2024 
   2025 	/* clear interesting stat counters */
   2026 	CSR_READ(sc, WMREG_COLC);
   2027 	CSR_READ(sc, WMREG_RXERRC);
   2028 
   2029 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2030 	    || (sc->sc_type >= WM_T_ICH8))
   2031 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2032 	if (sc->sc_type >= WM_T_ICH8)
   2033 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2034 
   2035 	/* Set PHY, NVM mutex related stuff */
   2036 	switch (sc->sc_type) {
   2037 	case WM_T_82542_2_0:
   2038 	case WM_T_82542_2_1:
   2039 	case WM_T_82543:
   2040 	case WM_T_82544:
   2041 		/* Microwire */
   2042 		sc->sc_nvm_wordsize = 64;
   2043 		sc->sc_nvm_addrbits = 6;
   2044 		break;
   2045 	case WM_T_82540:
   2046 	case WM_T_82545:
   2047 	case WM_T_82545_3:
   2048 	case WM_T_82546:
   2049 	case WM_T_82546_3:
   2050 		/* Microwire */
   2051 		reg = CSR_READ(sc, WMREG_EECD);
   2052 		if (reg & EECD_EE_SIZE) {
   2053 			sc->sc_nvm_wordsize = 256;
   2054 			sc->sc_nvm_addrbits = 8;
   2055 		} else {
   2056 			sc->sc_nvm_wordsize = 64;
   2057 			sc->sc_nvm_addrbits = 6;
   2058 		}
   2059 		sc->sc_flags |= WM_F_LOCK_EECD;
   2060 		break;
   2061 	case WM_T_82541:
   2062 	case WM_T_82541_2:
   2063 	case WM_T_82547:
   2064 	case WM_T_82547_2:
   2065 		sc->sc_flags |= WM_F_LOCK_EECD;
   2066 		reg = CSR_READ(sc, WMREG_EECD);
   2067 		if (reg & EECD_EE_TYPE) {
   2068 			/* SPI */
   2069 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2070 			wm_nvm_set_addrbits_size_eecd(sc);
   2071 		} else {
   2072 			/* Microwire */
   2073 			if ((reg & EECD_EE_ABITS) != 0) {
   2074 				sc->sc_nvm_wordsize = 256;
   2075 				sc->sc_nvm_addrbits = 8;
   2076 			} else {
   2077 				sc->sc_nvm_wordsize = 64;
   2078 				sc->sc_nvm_addrbits = 6;
   2079 			}
   2080 		}
   2081 		break;
   2082 	case WM_T_82571:
   2083 	case WM_T_82572:
   2084 		/* SPI */
   2085 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2086 		wm_nvm_set_addrbits_size_eecd(sc);
   2087 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2088 		sc->phy.acquire = wm_get_swsm_semaphore;
   2089 		sc->phy.release = wm_put_swsm_semaphore;
   2090 		break;
   2091 	case WM_T_82573:
   2092 	case WM_T_82574:
   2093 	case WM_T_82583:
   2094 		if (sc->sc_type == WM_T_82573) {
   2095 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2096 			sc->phy.acquire = wm_get_swsm_semaphore;
   2097 			sc->phy.release = wm_put_swsm_semaphore;
   2098 		} else {
   2099 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2100 			/* Both PHY and NVM use the same semaphore. */
   2101 			sc->phy.acquire
   2102 			    = wm_get_swfwhw_semaphore;
   2103 			sc->phy.release
   2104 			    = wm_put_swfwhw_semaphore;
   2105 		}
   2106 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2107 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2108 			sc->sc_nvm_wordsize = 2048;
   2109 		} else {
   2110 			/* SPI */
   2111 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2112 			wm_nvm_set_addrbits_size_eecd(sc);
   2113 		}
   2114 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2115 		break;
   2116 	case WM_T_82575:
   2117 	case WM_T_82576:
   2118 	case WM_T_82580:
   2119 	case WM_T_I350:
   2120 	case WM_T_I354:
   2121 	case WM_T_80003:
   2122 		/* SPI */
   2123 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2124 		wm_nvm_set_addrbits_size_eecd(sc);
   2125 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2126 		    | WM_F_LOCK_SWSM;
   2127 		sc->phy.acquire = wm_get_phy_82575;
   2128 		sc->phy.release = wm_put_phy_82575;
   2129 		break;
   2130 	case WM_T_ICH8:
   2131 	case WM_T_ICH9:
   2132 	case WM_T_ICH10:
   2133 	case WM_T_PCH:
   2134 	case WM_T_PCH2:
   2135 	case WM_T_PCH_LPT:
   2136 		/* FLASH */
   2137 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2138 		sc->sc_nvm_wordsize = 2048;
   2139 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2140 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2141 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2142 			aprint_error_dev(sc->sc_dev,
   2143 			    "can't map FLASH registers\n");
   2144 			goto out;
   2145 		}
   2146 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2147 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2148 		    ICH_FLASH_SECTOR_SIZE;
   2149 		sc->sc_ich8_flash_bank_size =
   2150 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2151 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2152 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2153 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2154 		sc->sc_flashreg_offset = 0;
   2155 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2156 		sc->phy.release = wm_put_swflag_ich8lan;
   2157 		break;
   2158 	case WM_T_PCH_SPT:
   2159 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2160 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2161 		sc->sc_flasht = sc->sc_st;
   2162 		sc->sc_flashh = sc->sc_sh;
   2163 		sc->sc_ich8_flash_base = 0;
   2164 		sc->sc_nvm_wordsize =
   2165 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2166 			* NVM_SIZE_MULTIPLIER;
   2167 		/* It is size in bytes, we want words */
   2168 		sc->sc_nvm_wordsize /= 2;
   2169 		/* assume 2 banks */
   2170 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2171 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2172 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2173 		sc->phy.release = wm_put_swflag_ich8lan;
   2174 		break;
   2175 	case WM_T_I210:
   2176 	case WM_T_I211:
   2177 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2180 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2181 		} else {
   2182 			sc->sc_nvm_wordsize = INVM_SIZE;
   2183 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2184 		}
   2185 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2186 		sc->phy.acquire = wm_get_phy_82575;
   2187 		sc->phy.release = wm_put_phy_82575;
   2188 		break;
   2189 	default:
   2190 		break;
   2191 	}
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2240 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2241 	else {
   2242 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2243 		    sc->sc_nvm_wordsize);
   2244 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2245 			aprint_verbose("iNVM");
   2246 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2247 			aprint_verbose("FLASH(HW)");
   2248 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2249 			aprint_verbose("FLASH");
   2250 		else {
   2251 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2252 				eetype = "SPI";
   2253 			else
   2254 				eetype = "MicroWire";
   2255 			aprint_verbose("(%d address bits) %s EEPROM",
   2256 			    sc->sc_nvm_addrbits, eetype);
   2257 		}
   2258 	}
   2259 	wm_nvm_version(sc);
   2260 	aprint_verbose("\n");
   2261 
   2262 	/*
   2263 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2264 	 * incorrect.
   2265 	 */
   2266 	wm_gmii_setup_phytype(sc, 0, 0);
   2267 
   2268 	/* Reset the chip to a known state. */
   2269 	wm_reset(sc);
   2270 
   2271 	/* Check for I21[01] PLL workaround */
   2272 	if (sc->sc_type == WM_T_I210)
   2273 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2274 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2275 		/* NVM image release 3.25 has a workaround */
   2276 		if ((sc->sc_nvm_ver_major < 3)
   2277 		    || ((sc->sc_nvm_ver_major == 3)
   2278 			&& (sc->sc_nvm_ver_minor < 25))) {
   2279 			aprint_verbose_dev(sc->sc_dev,
   2280 			    "ROM image version %d.%d is older than 3.25\n",
   2281 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2282 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2283 		}
   2284 	}
   2285 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2286 		wm_pll_workaround_i210(sc);
   2287 
   2288 	wm_get_wakeup(sc);
   2289 
   2290 	/* Non-AMT based hardware can now take control from firmware */
   2291 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2292 		wm_get_hw_control(sc);
   2293 
   2294 	/*
   2295 	 * Read the Ethernet address from the EEPROM, if not first found
   2296 	 * in device properties.
   2297 	 */
   2298 	ea = prop_dictionary_get(dict, "mac-address");
   2299 	if (ea != NULL) {
   2300 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2301 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2302 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2303 	} else {
   2304 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2305 			aprint_error_dev(sc->sc_dev,
   2306 			    "unable to read Ethernet address\n");
   2307 			goto out;
   2308 		}
   2309 	}
   2310 
   2311 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2312 	    ether_sprintf(enaddr));
   2313 
   2314 	/*
   2315 	 * Read the config info from the EEPROM, and set up various
   2316 	 * bits in the control registers based on their contents.
   2317 	 */
   2318 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2319 	if (pn != NULL) {
   2320 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2321 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2322 	} else {
   2323 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2324 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2325 			goto out;
   2326 		}
   2327 	}
   2328 
   2329 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2330 	if (pn != NULL) {
   2331 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2332 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2333 	} else {
   2334 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2335 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2336 			goto out;
   2337 		}
   2338 	}
   2339 
   2340 	/* check for WM_F_WOL */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82542_2_0:
   2343 	case WM_T_82542_2_1:
   2344 	case WM_T_82543:
   2345 		/* dummy? */
   2346 		eeprom_data = 0;
   2347 		apme_mask = NVM_CFG3_APME;
   2348 		break;
   2349 	case WM_T_82544:
   2350 		apme_mask = NVM_CFG2_82544_APM_EN;
   2351 		eeprom_data = cfg2;
   2352 		break;
   2353 	case WM_T_82546:
   2354 	case WM_T_82546_3:
   2355 	case WM_T_82571:
   2356 	case WM_T_82572:
   2357 	case WM_T_82573:
   2358 	case WM_T_82574:
   2359 	case WM_T_82583:
   2360 	case WM_T_80003:
   2361 	default:
   2362 		apme_mask = NVM_CFG3_APME;
   2363 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2364 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2365 		break;
   2366 	case WM_T_82575:
   2367 	case WM_T_82576:
   2368 	case WM_T_82580:
   2369 	case WM_T_I350:
   2370 	case WM_T_I354: /* XXX ok? */
   2371 	case WM_T_ICH8:
   2372 	case WM_T_ICH9:
   2373 	case WM_T_ICH10:
   2374 	case WM_T_PCH:
   2375 	case WM_T_PCH2:
   2376 	case WM_T_PCH_LPT:
   2377 	case WM_T_PCH_SPT:
   2378 		/* XXX The funcid should be checked on some devices */
   2379 		apme_mask = WUC_APME;
   2380 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2381 		break;
   2382 	}
   2383 
   2384 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2385 	if ((eeprom_data & apme_mask) != 0)
   2386 		sc->sc_flags |= WM_F_WOL;
   2387 
   2388 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2389 		/* Check NVM for autonegotiation */
   2390 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2391 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2392 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2393 		}
   2394 	}
   2395 
   2396 	/*
   2397 	 * XXX need special handling for some multiple port cards
   2398 	 * to disable a paticular port.
   2399 	 */
   2400 
   2401 	if (sc->sc_type >= WM_T_82544) {
   2402 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2403 		if (pn != NULL) {
   2404 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2405 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2406 		} else {
   2407 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2408 				aprint_error_dev(sc->sc_dev,
   2409 				    "unable to read SWDPIN\n");
   2410 				goto out;
   2411 			}
   2412 		}
   2413 	}
   2414 
   2415 	if (cfg1 & NVM_CFG1_ILOS)
   2416 		sc->sc_ctrl |= CTRL_ILOS;
   2417 
   2418 	/*
   2419 	 * XXX
   2420 	 * This code isn't correct because pin 2 and 3 are located
   2421 	 * in different position on newer chips. Check all datasheet.
   2422 	 *
   2423 	 * Until resolve this problem, check if a chip < 82580
   2424 	 */
   2425 	if (sc->sc_type <= WM_T_82580) {
   2426 		if (sc->sc_type >= WM_T_82544) {
   2427 			sc->sc_ctrl |=
   2428 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2429 			    CTRL_SWDPIO_SHIFT;
   2430 			sc->sc_ctrl |=
   2431 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2432 			    CTRL_SWDPINS_SHIFT;
   2433 		} else {
   2434 			sc->sc_ctrl |=
   2435 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2436 			    CTRL_SWDPIO_SHIFT;
   2437 		}
   2438 	}
   2439 
   2440 	/* XXX For other than 82580? */
   2441 	if (sc->sc_type == WM_T_82580) {
   2442 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2443 		if (nvmword & __BIT(13))
   2444 			sc->sc_ctrl |= CTRL_ILOS;
   2445 	}
   2446 
   2447 #if 0
   2448 	if (sc->sc_type >= WM_T_82544) {
   2449 		if (cfg1 & NVM_CFG1_IPS0)
   2450 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2451 		if (cfg1 & NVM_CFG1_IPS1)
   2452 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2453 		sc->sc_ctrl_ext |=
   2454 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2455 		    CTRL_EXT_SWDPIO_SHIFT;
   2456 		sc->sc_ctrl_ext |=
   2457 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2458 		    CTRL_EXT_SWDPINS_SHIFT;
   2459 	} else {
   2460 		sc->sc_ctrl_ext |=
   2461 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2462 		    CTRL_EXT_SWDPIO_SHIFT;
   2463 	}
   2464 #endif
   2465 
   2466 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2467 #if 0
   2468 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2469 #endif
   2470 
   2471 	if (sc->sc_type == WM_T_PCH) {
   2472 		uint16_t val;
   2473 
   2474 		/* Save the NVM K1 bit setting */
   2475 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2476 
   2477 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2478 			sc->sc_nvm_k1_enabled = 1;
   2479 		else
   2480 			sc->sc_nvm_k1_enabled = 0;
   2481 	}
   2482 
   2483 	/*
   2484 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2485 	 * media structures accordingly.
   2486 	 */
   2487 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2488 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2489 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2490 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2491 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2492 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2493 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2494 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2495 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2496 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2497 	    || (sc->sc_type ==WM_T_I211)) {
   2498 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2499 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2500 		switch (link_mode) {
   2501 		case CTRL_EXT_LINK_MODE_1000KX:
   2502 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2503 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2504 			break;
   2505 		case CTRL_EXT_LINK_MODE_SGMII:
   2506 			if (wm_sgmii_uses_mdio(sc)) {
   2507 				aprint_verbose_dev(sc->sc_dev,
   2508 				    "SGMII(MDIO)\n");
   2509 				sc->sc_flags |= WM_F_SGMII;
   2510 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2511 				break;
   2512 			}
   2513 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2514 			/*FALLTHROUGH*/
   2515 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2516 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2517 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2518 				if (link_mode
   2519 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2520 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2521 					sc->sc_flags |= WM_F_SGMII;
   2522 				} else {
   2523 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2524 					aprint_verbose_dev(sc->sc_dev,
   2525 					    "SERDES\n");
   2526 				}
   2527 				break;
   2528 			}
   2529 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2530 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2531 
   2532 			/* Change current link mode setting */
   2533 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2534 			switch (sc->sc_mediatype) {
   2535 			case WM_MEDIATYPE_COPPER:
   2536 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2537 				break;
   2538 			case WM_MEDIATYPE_SERDES:
   2539 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2540 				break;
   2541 			default:
   2542 				break;
   2543 			}
   2544 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2545 			break;
   2546 		case CTRL_EXT_LINK_MODE_GMII:
   2547 		default:
   2548 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2549 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2550 			break;
   2551 		}
   2552 
   2553 		reg &= ~CTRL_EXT_I2C_ENA;
   2554 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2555 			reg |= CTRL_EXT_I2C_ENA;
   2556 		else
   2557 			reg &= ~CTRL_EXT_I2C_ENA;
   2558 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2559 
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2561 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2562 		else
   2563 			wm_tbi_mediainit(sc);
   2564 	} else if (sc->sc_type < WM_T_82543 ||
   2565 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2566 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2567 			aprint_error_dev(sc->sc_dev,
   2568 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2569 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2570 		}
   2571 		wm_tbi_mediainit(sc);
   2572 	} else {
   2573 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2574 			aprint_error_dev(sc->sc_dev,
   2575 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2576 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2577 		}
   2578 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2579 	}
   2580 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2581 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2582 
   2583 	/* Set device properties (macflags) */
   2584 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2585 
   2586 	ifp = &sc->sc_ethercom.ec_if;
   2587 	xname = device_xname(sc->sc_dev);
   2588 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2589 	ifp->if_softc = sc;
   2590 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2591 #ifdef WM_MPSAFE
   2592 	ifp->if_extflags = IFEF_START_MPSAFE;
   2593 #endif
   2594 	ifp->if_ioctl = wm_ioctl;
   2595 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2596 		ifp->if_start = wm_nq_start;
   2597 		/*
   2598 		 * When the number of CPUs is one and the controller can use
   2599 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2600 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2601 		 * and the other is used for link status changing.
   2602 		 * In this situation, wm_nq_transmit() is disadvantageous
   2603 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2604 		 */
   2605 		if (wm_is_using_multiqueue(sc))
   2606 			ifp->if_transmit = wm_nq_transmit;
   2607 	} else {
   2608 		ifp->if_start = wm_start;
   2609 		/*
   2610 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2611 		 */
   2612 		if (wm_is_using_multiqueue(sc))
   2613 			ifp->if_transmit = wm_transmit;
   2614 	}
   2615 	ifp->if_watchdog = wm_watchdog;
   2616 	ifp->if_init = wm_init;
   2617 	ifp->if_stop = wm_stop;
   2618 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2619 	IFQ_SET_READY(&ifp->if_snd);
   2620 
   2621 	/* Check for jumbo frame */
   2622 	switch (sc->sc_type) {
   2623 	case WM_T_82573:
   2624 		/* XXX limited to 9234 if ASPM is disabled */
   2625 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2626 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2627 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2628 		break;
   2629 	case WM_T_82571:
   2630 	case WM_T_82572:
   2631 	case WM_T_82574:
   2632 	case WM_T_82575:
   2633 	case WM_T_82576:
   2634 	case WM_T_82580:
   2635 	case WM_T_I350:
   2636 	case WM_T_I354: /* XXXX ok? */
   2637 	case WM_T_I210:
   2638 	case WM_T_I211:
   2639 	case WM_T_80003:
   2640 	case WM_T_ICH9:
   2641 	case WM_T_ICH10:
   2642 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2643 	case WM_T_PCH_LPT:
   2644 	case WM_T_PCH_SPT:
   2645 		/* XXX limited to 9234 */
   2646 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2647 		break;
   2648 	case WM_T_PCH:
   2649 		/* XXX limited to 4096 */
   2650 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2651 		break;
   2652 	case WM_T_82542_2_0:
   2653 	case WM_T_82542_2_1:
   2654 	case WM_T_82583:
   2655 	case WM_T_ICH8:
   2656 		/* No support for jumbo frame */
   2657 		break;
   2658 	default:
   2659 		/* ETHER_MAX_LEN_JUMBO */
   2660 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2661 		break;
   2662 	}
   2663 
   2664 	/* If we're a i82543 or greater, we can support VLANs. */
   2665 	if (sc->sc_type >= WM_T_82543)
   2666 		sc->sc_ethercom.ec_capabilities |=
   2667 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2668 
   2669 	/*
   2670 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2671 	 * on i82543 and later.
   2672 	 */
   2673 	if (sc->sc_type >= WM_T_82543) {
   2674 		ifp->if_capabilities |=
   2675 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2676 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2677 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2678 		    IFCAP_CSUM_TCPv6_Tx |
   2679 		    IFCAP_CSUM_UDPv6_Tx;
   2680 	}
   2681 
   2682 	/*
   2683 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2684 	 *
   2685 	 *	82541GI (8086:1076) ... no
   2686 	 *	82572EI (8086:10b9) ... yes
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82571) {
   2689 		ifp->if_capabilities |=
   2690 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2691 	}
   2692 
   2693 	/*
   2694 	 * If we're a i82544 or greater (except i82547), we can do
   2695 	 * TCP segmentation offload.
   2696 	 */
   2697 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2698 		ifp->if_capabilities |= IFCAP_TSOv4;
   2699 	}
   2700 
   2701 	if (sc->sc_type >= WM_T_82571) {
   2702 		ifp->if_capabilities |= IFCAP_TSOv6;
   2703 	}
   2704 
   2705 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2706 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2707 
   2708 #ifdef WM_MPSAFE
   2709 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2710 #else
   2711 	sc->sc_core_lock = NULL;
   2712 #endif
   2713 
   2714 	/* Attach the interface. */
   2715 	if_initialize(ifp);
   2716 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2717 	ether_ifattach(ifp, enaddr);
   2718 	if_register(ifp);
   2719 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2720 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2721 			  RND_FLAG_DEFAULT);
   2722 
   2723 #ifdef WM_EVENT_COUNTERS
   2724 	/* Attach event counters. */
   2725 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2726 	    NULL, xname, "linkintr");
   2727 
   2728 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2729 	    NULL, xname, "tx_xoff");
   2730 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2731 	    NULL, xname, "tx_xon");
   2732 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2733 	    NULL, xname, "rx_xoff");
   2734 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2735 	    NULL, xname, "rx_xon");
   2736 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2737 	    NULL, xname, "rx_macctl");
   2738 #endif /* WM_EVENT_COUNTERS */
   2739 
   2740 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2741 		pmf_class_network_register(self, ifp);
   2742 	else
   2743 		aprint_error_dev(self, "couldn't establish power handler\n");
   2744 
   2745 	sc->sc_flags |= WM_F_ATTACHED;
   2746  out:
   2747 	return;
   2748 }
   2749 
   2750 /* The detach function (ca_detach) */
   2751 static int
   2752 wm_detach(device_t self, int flags __unused)
   2753 {
   2754 	struct wm_softc *sc = device_private(self);
   2755 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2756 	int i;
   2757 
   2758 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2759 		return 0;
   2760 
   2761 	/* Stop the interface. Callouts are stopped in it. */
   2762 	wm_stop(ifp, 1);
   2763 
   2764 	pmf_device_deregister(self);
   2765 
   2766 #ifdef WM_EVENT_COUNTERS
   2767 	evcnt_detach(&sc->sc_ev_linkintr);
   2768 
   2769 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2770 	evcnt_detach(&sc->sc_ev_tx_xon);
   2771 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2772 	evcnt_detach(&sc->sc_ev_rx_xon);
   2773 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2774 #endif /* WM_EVENT_COUNTERS */
   2775 
   2776 	/* Tell the firmware about the release */
   2777 	WM_CORE_LOCK(sc);
   2778 	wm_release_manageability(sc);
   2779 	wm_release_hw_control(sc);
   2780 	wm_enable_wakeup(sc);
   2781 	WM_CORE_UNLOCK(sc);
   2782 
   2783 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2784 
   2785 	/* Delete all remaining media. */
   2786 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2787 
   2788 	ether_ifdetach(ifp);
   2789 	if_detach(ifp);
   2790 	if_percpuq_destroy(sc->sc_ipq);
   2791 
   2792 	/* Unload RX dmamaps and free mbufs */
   2793 	for (i = 0; i < sc->sc_nqueues; i++) {
   2794 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2795 		mutex_enter(rxq->rxq_lock);
   2796 		wm_rxdrain(rxq);
   2797 		mutex_exit(rxq->rxq_lock);
   2798 	}
   2799 	/* Must unlock here */
   2800 
   2801 	/* Disestablish the interrupt handler */
   2802 	for (i = 0; i < sc->sc_nintrs; i++) {
   2803 		if (sc->sc_ihs[i] != NULL) {
   2804 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2805 			sc->sc_ihs[i] = NULL;
   2806 		}
   2807 	}
   2808 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2809 
   2810 	wm_free_txrx_queues(sc);
   2811 
   2812 	/* Unmap the registers */
   2813 	if (sc->sc_ss) {
   2814 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2815 		sc->sc_ss = 0;
   2816 	}
   2817 	if (sc->sc_ios) {
   2818 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2819 		sc->sc_ios = 0;
   2820 	}
   2821 	if (sc->sc_flashs) {
   2822 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2823 		sc->sc_flashs = 0;
   2824 	}
   2825 
   2826 	if (sc->sc_core_lock)
   2827 		mutex_obj_free(sc->sc_core_lock);
   2828 	if (sc->sc_ich_phymtx)
   2829 		mutex_obj_free(sc->sc_ich_phymtx);
   2830 	if (sc->sc_ich_nvmmtx)
   2831 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2832 
   2833 	return 0;
   2834 }
   2835 
   2836 static bool
   2837 wm_suspend(device_t self, const pmf_qual_t *qual)
   2838 {
   2839 	struct wm_softc *sc = device_private(self);
   2840 
   2841 	wm_release_manageability(sc);
   2842 	wm_release_hw_control(sc);
   2843 	wm_enable_wakeup(sc);
   2844 
   2845 	return true;
   2846 }
   2847 
   2848 static bool
   2849 wm_resume(device_t self, const pmf_qual_t *qual)
   2850 {
   2851 	struct wm_softc *sc = device_private(self);
   2852 
   2853 	wm_init_manageability(sc);
   2854 
   2855 	return true;
   2856 }
   2857 
   2858 /*
   2859  * wm_watchdog:		[ifnet interface function]
   2860  *
   2861  *	Watchdog timer handler.
   2862  */
   2863 static void
   2864 wm_watchdog(struct ifnet *ifp)
   2865 {
   2866 	int qid;
   2867 	struct wm_softc *sc = ifp->if_softc;
   2868 
   2869 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2870 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2871 
   2872 		wm_watchdog_txq(ifp, txq);
   2873 	}
   2874 
   2875 	/* Reset the interface. */
   2876 	(void) wm_init(ifp);
   2877 
   2878 	/*
   2879 	 * There are still some upper layer processing which call
   2880 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2881 	 */
   2882 	/* Try to get more packets going. */
   2883 	ifp->if_start(ifp);
   2884 }
   2885 
   2886 static void
   2887 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2888 {
   2889 	struct wm_softc *sc = ifp->if_softc;
   2890 
   2891 	/*
   2892 	 * Since we're using delayed interrupts, sweep up
   2893 	 * before we report an error.
   2894 	 */
   2895 	mutex_enter(txq->txq_lock);
   2896 	wm_txeof(sc, txq);
   2897 	mutex_exit(txq->txq_lock);
   2898 
   2899 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2900 #ifdef WM_DEBUG
   2901 		int i, j;
   2902 		struct wm_txsoft *txs;
   2903 #endif
   2904 		log(LOG_ERR,
   2905 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2906 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2907 		    txq->txq_next);
   2908 		ifp->if_oerrors++;
   2909 #ifdef WM_DEBUG
   2910 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2911 		    i = WM_NEXTTXS(txq, i)) {
   2912 		    txs = &txq->txq_soft[i];
   2913 		    printf("txs %d tx %d -> %d\n",
   2914 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2915 		    for (j = txs->txs_firstdesc; ;
   2916 			j = WM_NEXTTX(txq, j)) {
   2917 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2918 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2919 			printf("\t %#08x%08x\n",
   2920 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2921 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2922 			if (j == txs->txs_lastdesc)
   2923 				break;
   2924 			}
   2925 		}
   2926 #endif
   2927 	}
   2928 }
   2929 
   2930 /*
   2931  * wm_tick:
   2932  *
   2933  *	One second timer, used to check link status, sweep up
   2934  *	completed transmit jobs, etc.
   2935  */
   2936 static void
   2937 wm_tick(void *arg)
   2938 {
   2939 	struct wm_softc *sc = arg;
   2940 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2941 #ifndef WM_MPSAFE
   2942 	int s = splnet();
   2943 #endif
   2944 
   2945 	WM_CORE_LOCK(sc);
   2946 
   2947 	if (sc->sc_core_stopping)
   2948 		goto out;
   2949 
   2950 	if (sc->sc_type >= WM_T_82542_2_1) {
   2951 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2952 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2953 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2954 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2955 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2956 	}
   2957 
   2958 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2959 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2960 	    + CSR_READ(sc, WMREG_CRCERRS)
   2961 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2962 	    + CSR_READ(sc, WMREG_SYMERRC)
   2963 	    + CSR_READ(sc, WMREG_RXERRC)
   2964 	    + CSR_READ(sc, WMREG_SEC)
   2965 	    + CSR_READ(sc, WMREG_CEXTERR)
   2966 	    + CSR_READ(sc, WMREG_RLEC);
   2967 	/*
   2968 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2969 	 * memory. It does not mean the number of dropped packet. Because
   2970 	 * ethernet controller can receive packets in such case if there is
   2971 	 * space in phy's FIFO.
   2972 	 *
   2973 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2974 	 * own EVCNT instead of if_iqdrops.
   2975 	 */
   2976 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2977 
   2978 	if (sc->sc_flags & WM_F_HAS_MII)
   2979 		mii_tick(&sc->sc_mii);
   2980 	else if ((sc->sc_type >= WM_T_82575)
   2981 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2982 		wm_serdes_tick(sc);
   2983 	else
   2984 		wm_tbi_tick(sc);
   2985 
   2986 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2987 out:
   2988 	WM_CORE_UNLOCK(sc);
   2989 #ifndef WM_MPSAFE
   2990 	splx(s);
   2991 #endif
   2992 }
   2993 
   2994 static int
   2995 wm_ifflags_cb(struct ethercom *ec)
   2996 {
   2997 	struct ifnet *ifp = &ec->ec_if;
   2998 	struct wm_softc *sc = ifp->if_softc;
   2999 	int rc = 0;
   3000 
   3001 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3002 		device_xname(sc->sc_dev), __func__));
   3003 
   3004 	WM_CORE_LOCK(sc);
   3005 
   3006 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3007 	sc->sc_if_flags = ifp->if_flags;
   3008 
   3009 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3010 		rc = ENETRESET;
   3011 		goto out;
   3012 	}
   3013 
   3014 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3015 		wm_set_filter(sc);
   3016 
   3017 	wm_set_vlan(sc);
   3018 
   3019 out:
   3020 	WM_CORE_UNLOCK(sc);
   3021 
   3022 	return rc;
   3023 }
   3024 
   3025 /*
   3026  * wm_ioctl:		[ifnet interface function]
   3027  *
   3028  *	Handle control requests from the operator.
   3029  */
   3030 static int
   3031 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3032 {
   3033 	struct wm_softc *sc = ifp->if_softc;
   3034 	struct ifreq *ifr = (struct ifreq *) data;
   3035 	struct ifaddr *ifa = (struct ifaddr *)data;
   3036 	struct sockaddr_dl *sdl;
   3037 	int s, error;
   3038 
   3039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3040 		device_xname(sc->sc_dev), __func__));
   3041 
   3042 #ifndef WM_MPSAFE
   3043 	s = splnet();
   3044 #endif
   3045 	switch (cmd) {
   3046 	case SIOCSIFMEDIA:
   3047 	case SIOCGIFMEDIA:
   3048 		WM_CORE_LOCK(sc);
   3049 		/* Flow control requires full-duplex mode. */
   3050 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3051 		    (ifr->ifr_media & IFM_FDX) == 0)
   3052 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3053 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3054 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3055 				/* We can do both TXPAUSE and RXPAUSE. */
   3056 				ifr->ifr_media |=
   3057 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3058 			}
   3059 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3060 		}
   3061 		WM_CORE_UNLOCK(sc);
   3062 #ifdef WM_MPSAFE
   3063 		s = splnet();
   3064 #endif
   3065 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3066 #ifdef WM_MPSAFE
   3067 		splx(s);
   3068 #endif
   3069 		break;
   3070 	case SIOCINITIFADDR:
   3071 		WM_CORE_LOCK(sc);
   3072 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3073 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3074 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3075 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3076 			/* unicast address is first multicast entry */
   3077 			wm_set_filter(sc);
   3078 			error = 0;
   3079 			WM_CORE_UNLOCK(sc);
   3080 			break;
   3081 		}
   3082 		WM_CORE_UNLOCK(sc);
   3083 		/*FALLTHROUGH*/
   3084 	default:
   3085 #ifdef WM_MPSAFE
   3086 		s = splnet();
   3087 #endif
   3088 		/* It may call wm_start, so unlock here */
   3089 		error = ether_ioctl(ifp, cmd, data);
   3090 #ifdef WM_MPSAFE
   3091 		splx(s);
   3092 #endif
   3093 		if (error != ENETRESET)
   3094 			break;
   3095 
   3096 		error = 0;
   3097 
   3098 		if (cmd == SIOCSIFCAP) {
   3099 			error = (*ifp->if_init)(ifp);
   3100 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3101 			;
   3102 		else if (ifp->if_flags & IFF_RUNNING) {
   3103 			/*
   3104 			 * Multicast list has changed; set the hardware filter
   3105 			 * accordingly.
   3106 			 */
   3107 			WM_CORE_LOCK(sc);
   3108 			wm_set_filter(sc);
   3109 			WM_CORE_UNLOCK(sc);
   3110 		}
   3111 		break;
   3112 	}
   3113 
   3114 #ifndef WM_MPSAFE
   3115 	splx(s);
   3116 #endif
   3117 	return error;
   3118 }
   3119 
   3120 /* MAC address related */
   3121 
   3122 /*
   3123  * Get the offset of MAC address and return it.
   3124  * If error occured, use offset 0.
   3125  */
   3126 static uint16_t
   3127 wm_check_alt_mac_addr(struct wm_softc *sc)
   3128 {
   3129 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3130 	uint16_t offset = NVM_OFF_MACADDR;
   3131 
   3132 	/* Try to read alternative MAC address pointer */
   3133 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3134 		return 0;
   3135 
   3136 	/* Check pointer if it's valid or not. */
   3137 	if ((offset == 0x0000) || (offset == 0xffff))
   3138 		return 0;
   3139 
   3140 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3141 	/*
   3142 	 * Check whether alternative MAC address is valid or not.
   3143 	 * Some cards have non 0xffff pointer but those don't use
   3144 	 * alternative MAC address in reality.
   3145 	 *
   3146 	 * Check whether the broadcast bit is set or not.
   3147 	 */
   3148 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3149 		if (((myea[0] & 0xff) & 0x01) == 0)
   3150 			return offset; /* Found */
   3151 
   3152 	/* Not found */
   3153 	return 0;
   3154 }
   3155 
   3156 static int
   3157 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3158 {
   3159 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3160 	uint16_t offset = NVM_OFF_MACADDR;
   3161 	int do_invert = 0;
   3162 
   3163 	switch (sc->sc_type) {
   3164 	case WM_T_82580:
   3165 	case WM_T_I350:
   3166 	case WM_T_I354:
   3167 		/* EEPROM Top Level Partitioning */
   3168 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3169 		break;
   3170 	case WM_T_82571:
   3171 	case WM_T_82575:
   3172 	case WM_T_82576:
   3173 	case WM_T_80003:
   3174 	case WM_T_I210:
   3175 	case WM_T_I211:
   3176 		offset = wm_check_alt_mac_addr(sc);
   3177 		if (offset == 0)
   3178 			if ((sc->sc_funcid & 0x01) == 1)
   3179 				do_invert = 1;
   3180 		break;
   3181 	default:
   3182 		if ((sc->sc_funcid & 0x01) == 1)
   3183 			do_invert = 1;
   3184 		break;
   3185 	}
   3186 
   3187 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3188 		goto bad;
   3189 
   3190 	enaddr[0] = myea[0] & 0xff;
   3191 	enaddr[1] = myea[0] >> 8;
   3192 	enaddr[2] = myea[1] & 0xff;
   3193 	enaddr[3] = myea[1] >> 8;
   3194 	enaddr[4] = myea[2] & 0xff;
   3195 	enaddr[5] = myea[2] >> 8;
   3196 
   3197 	/*
   3198 	 * Toggle the LSB of the MAC address on the second port
   3199 	 * of some dual port cards.
   3200 	 */
   3201 	if (do_invert != 0)
   3202 		enaddr[5] ^= 1;
   3203 
   3204 	return 0;
   3205 
   3206  bad:
   3207 	return -1;
   3208 }
   3209 
   3210 /*
   3211  * wm_set_ral:
   3212  *
   3213  *	Set an entery in the receive address list.
   3214  */
   3215 static void
   3216 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3217 {
   3218 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3219 	uint32_t wlock_mac;
   3220 	int rv;
   3221 
   3222 	if (enaddr != NULL) {
   3223 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3224 		    (enaddr[3] << 24);
   3225 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3226 		ral_hi |= RAL_AV;
   3227 	} else {
   3228 		ral_lo = 0;
   3229 		ral_hi = 0;
   3230 	}
   3231 
   3232 	switch (sc->sc_type) {
   3233 	case WM_T_82542_2_0:
   3234 	case WM_T_82542_2_1:
   3235 	case WM_T_82543:
   3236 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3237 		CSR_WRITE_FLUSH(sc);
   3238 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3239 		CSR_WRITE_FLUSH(sc);
   3240 		break;
   3241 	case WM_T_PCH2:
   3242 	case WM_T_PCH_LPT:
   3243 	case WM_T_PCH_SPT:
   3244 		if (idx == 0) {
   3245 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3246 			CSR_WRITE_FLUSH(sc);
   3247 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3248 			CSR_WRITE_FLUSH(sc);
   3249 			return;
   3250 		}
   3251 		if (sc->sc_type != WM_T_PCH2) {
   3252 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3253 			    FWSM_WLOCK_MAC);
   3254 			addrl = WMREG_SHRAL(idx - 1);
   3255 			addrh = WMREG_SHRAH(idx - 1);
   3256 		} else {
   3257 			wlock_mac = 0;
   3258 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3259 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3260 		}
   3261 
   3262 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3263 			rv = wm_get_swflag_ich8lan(sc);
   3264 			if (rv != 0)
   3265 				return;
   3266 			CSR_WRITE(sc, addrl, ral_lo);
   3267 			CSR_WRITE_FLUSH(sc);
   3268 			CSR_WRITE(sc, addrh, ral_hi);
   3269 			CSR_WRITE_FLUSH(sc);
   3270 			wm_put_swflag_ich8lan(sc);
   3271 		}
   3272 
   3273 		break;
   3274 	default:
   3275 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3276 		CSR_WRITE_FLUSH(sc);
   3277 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3278 		CSR_WRITE_FLUSH(sc);
   3279 		break;
   3280 	}
   3281 }
   3282 
   3283 /*
   3284  * wm_mchash:
   3285  *
   3286  *	Compute the hash of the multicast address for the 4096-bit
   3287  *	multicast filter.
   3288  */
   3289 static uint32_t
   3290 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3291 {
   3292 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3293 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3294 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3295 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3296 	uint32_t hash;
   3297 
   3298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3299 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3300 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3301 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3302 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3303 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3304 		return (hash & 0x3ff);
   3305 	}
   3306 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3307 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3308 
   3309 	return (hash & 0xfff);
   3310 }
   3311 
   3312 /*
   3313  * wm_set_filter:
   3314  *
   3315  *	Set up the receive filter.
   3316  */
   3317 static void
   3318 wm_set_filter(struct wm_softc *sc)
   3319 {
   3320 	struct ethercom *ec = &sc->sc_ethercom;
   3321 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3322 	struct ether_multi *enm;
   3323 	struct ether_multistep step;
   3324 	bus_addr_t mta_reg;
   3325 	uint32_t hash, reg, bit;
   3326 	int i, size, ralmax;
   3327 
   3328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3329 		device_xname(sc->sc_dev), __func__));
   3330 
   3331 	if (sc->sc_type >= WM_T_82544)
   3332 		mta_reg = WMREG_CORDOVA_MTA;
   3333 	else
   3334 		mta_reg = WMREG_MTA;
   3335 
   3336 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3337 
   3338 	if (ifp->if_flags & IFF_BROADCAST)
   3339 		sc->sc_rctl |= RCTL_BAM;
   3340 	if (ifp->if_flags & IFF_PROMISC) {
   3341 		sc->sc_rctl |= RCTL_UPE;
   3342 		goto allmulti;
   3343 	}
   3344 
   3345 	/*
   3346 	 * Set the station address in the first RAL slot, and
   3347 	 * clear the remaining slots.
   3348 	 */
   3349 	if (sc->sc_type == WM_T_ICH8)
   3350 		size = WM_RAL_TABSIZE_ICH8 -1;
   3351 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3352 	    || (sc->sc_type == WM_T_PCH))
   3353 		size = WM_RAL_TABSIZE_ICH8;
   3354 	else if (sc->sc_type == WM_T_PCH2)
   3355 		size = WM_RAL_TABSIZE_PCH2;
   3356 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3357 		size = WM_RAL_TABSIZE_PCH_LPT;
   3358 	else if (sc->sc_type == WM_T_82575)
   3359 		size = WM_RAL_TABSIZE_82575;
   3360 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3361 		size = WM_RAL_TABSIZE_82576;
   3362 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3363 		size = WM_RAL_TABSIZE_I350;
   3364 	else
   3365 		size = WM_RAL_TABSIZE;
   3366 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3367 
   3368 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3369 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3370 		switch (i) {
   3371 		case 0:
   3372 			/* We can use all entries */
   3373 			ralmax = size;
   3374 			break;
   3375 		case 1:
   3376 			/* Only RAR[0] */
   3377 			ralmax = 1;
   3378 			break;
   3379 		default:
   3380 			/* available SHRA + RAR[0] */
   3381 			ralmax = i + 1;
   3382 		}
   3383 	} else
   3384 		ralmax = size;
   3385 	for (i = 1; i < size; i++) {
   3386 		if (i < ralmax)
   3387 			wm_set_ral(sc, NULL, i);
   3388 	}
   3389 
   3390 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3391 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3392 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3393 	    || (sc->sc_type == WM_T_PCH_SPT))
   3394 		size = WM_ICH8_MC_TABSIZE;
   3395 	else
   3396 		size = WM_MC_TABSIZE;
   3397 	/* Clear out the multicast table. */
   3398 	for (i = 0; i < size; i++) {
   3399 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3400 		CSR_WRITE_FLUSH(sc);
   3401 	}
   3402 
   3403 	ETHER_LOCK(ec);
   3404 	ETHER_FIRST_MULTI(step, ec, enm);
   3405 	while (enm != NULL) {
   3406 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3407 			ETHER_UNLOCK(ec);
   3408 			/*
   3409 			 * We must listen to a range of multicast addresses.
   3410 			 * For now, just accept all multicasts, rather than
   3411 			 * trying to set only those filter bits needed to match
   3412 			 * the range.  (At this time, the only use of address
   3413 			 * ranges is for IP multicast routing, for which the
   3414 			 * range is big enough to require all bits set.)
   3415 			 */
   3416 			goto allmulti;
   3417 		}
   3418 
   3419 		hash = wm_mchash(sc, enm->enm_addrlo);
   3420 
   3421 		reg = (hash >> 5);
   3422 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3423 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3424 		    || (sc->sc_type == WM_T_PCH2)
   3425 		    || (sc->sc_type == WM_T_PCH_LPT)
   3426 		    || (sc->sc_type == WM_T_PCH_SPT))
   3427 			reg &= 0x1f;
   3428 		else
   3429 			reg &= 0x7f;
   3430 		bit = hash & 0x1f;
   3431 
   3432 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3433 		hash |= 1U << bit;
   3434 
   3435 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3436 			/*
   3437 			 * 82544 Errata 9: Certain register cannot be written
   3438 			 * with particular alignments in PCI-X bus operation
   3439 			 * (FCAH, MTA and VFTA).
   3440 			 */
   3441 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3442 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3443 			CSR_WRITE_FLUSH(sc);
   3444 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3445 			CSR_WRITE_FLUSH(sc);
   3446 		} else {
   3447 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3448 			CSR_WRITE_FLUSH(sc);
   3449 		}
   3450 
   3451 		ETHER_NEXT_MULTI(step, enm);
   3452 	}
   3453 	ETHER_UNLOCK(ec);
   3454 
   3455 	ifp->if_flags &= ~IFF_ALLMULTI;
   3456 	goto setit;
   3457 
   3458  allmulti:
   3459 	ifp->if_flags |= IFF_ALLMULTI;
   3460 	sc->sc_rctl |= RCTL_MPE;
   3461 
   3462  setit:
   3463 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3464 }
   3465 
   3466 /* Reset and init related */
   3467 
   3468 static void
   3469 wm_set_vlan(struct wm_softc *sc)
   3470 {
   3471 
   3472 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3473 		device_xname(sc->sc_dev), __func__));
   3474 
   3475 	/* Deal with VLAN enables. */
   3476 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3477 		sc->sc_ctrl |= CTRL_VME;
   3478 	else
   3479 		sc->sc_ctrl &= ~CTRL_VME;
   3480 
   3481 	/* Write the control registers. */
   3482 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3483 }
   3484 
   3485 static void
   3486 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3487 {
   3488 	uint32_t gcr;
   3489 	pcireg_t ctrl2;
   3490 
   3491 	gcr = CSR_READ(sc, WMREG_GCR);
   3492 
   3493 	/* Only take action if timeout value is defaulted to 0 */
   3494 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3495 		goto out;
   3496 
   3497 	if ((gcr & GCR_CAP_VER2) == 0) {
   3498 		gcr |= GCR_CMPL_TMOUT_10MS;
   3499 		goto out;
   3500 	}
   3501 
   3502 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3503 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3504 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3505 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3506 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3507 
   3508 out:
   3509 	/* Disable completion timeout resend */
   3510 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3511 
   3512 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3513 }
   3514 
   3515 void
   3516 wm_get_auto_rd_done(struct wm_softc *sc)
   3517 {
   3518 	int i;
   3519 
   3520 	/* wait for eeprom to reload */
   3521 	switch (sc->sc_type) {
   3522 	case WM_T_82571:
   3523 	case WM_T_82572:
   3524 	case WM_T_82573:
   3525 	case WM_T_82574:
   3526 	case WM_T_82583:
   3527 	case WM_T_82575:
   3528 	case WM_T_82576:
   3529 	case WM_T_82580:
   3530 	case WM_T_I350:
   3531 	case WM_T_I354:
   3532 	case WM_T_I210:
   3533 	case WM_T_I211:
   3534 	case WM_T_80003:
   3535 	case WM_T_ICH8:
   3536 	case WM_T_ICH9:
   3537 		for (i = 0; i < 10; i++) {
   3538 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3539 				break;
   3540 			delay(1000);
   3541 		}
   3542 		if (i == 10) {
   3543 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3544 			    "complete\n", device_xname(sc->sc_dev));
   3545 		}
   3546 		break;
   3547 	default:
   3548 		break;
   3549 	}
   3550 }
   3551 
   3552 void
   3553 wm_lan_init_done(struct wm_softc *sc)
   3554 {
   3555 	uint32_t reg = 0;
   3556 	int i;
   3557 
   3558 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3559 		device_xname(sc->sc_dev), __func__));
   3560 
   3561 	/* Wait for eeprom to reload */
   3562 	switch (sc->sc_type) {
   3563 	case WM_T_ICH10:
   3564 	case WM_T_PCH:
   3565 	case WM_T_PCH2:
   3566 	case WM_T_PCH_LPT:
   3567 	case WM_T_PCH_SPT:
   3568 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3569 			reg = CSR_READ(sc, WMREG_STATUS);
   3570 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3571 				break;
   3572 			delay(100);
   3573 		}
   3574 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3575 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3576 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3577 		}
   3578 		break;
   3579 	default:
   3580 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3581 		    __func__);
   3582 		break;
   3583 	}
   3584 
   3585 	reg &= ~STATUS_LAN_INIT_DONE;
   3586 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3587 }
   3588 
   3589 void
   3590 wm_get_cfg_done(struct wm_softc *sc)
   3591 {
   3592 	int mask;
   3593 	uint32_t reg;
   3594 	int i;
   3595 
   3596 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3597 		device_xname(sc->sc_dev), __func__));
   3598 
   3599 	/* Wait for eeprom to reload */
   3600 	switch (sc->sc_type) {
   3601 	case WM_T_82542_2_0:
   3602 	case WM_T_82542_2_1:
   3603 		/* null */
   3604 		break;
   3605 	case WM_T_82543:
   3606 	case WM_T_82544:
   3607 	case WM_T_82540:
   3608 	case WM_T_82545:
   3609 	case WM_T_82545_3:
   3610 	case WM_T_82546:
   3611 	case WM_T_82546_3:
   3612 	case WM_T_82541:
   3613 	case WM_T_82541_2:
   3614 	case WM_T_82547:
   3615 	case WM_T_82547_2:
   3616 	case WM_T_82573:
   3617 	case WM_T_82574:
   3618 	case WM_T_82583:
   3619 		/* generic */
   3620 		delay(10*1000);
   3621 		break;
   3622 	case WM_T_80003:
   3623 	case WM_T_82571:
   3624 	case WM_T_82572:
   3625 	case WM_T_82575:
   3626 	case WM_T_82576:
   3627 	case WM_T_82580:
   3628 	case WM_T_I350:
   3629 	case WM_T_I354:
   3630 	case WM_T_I210:
   3631 	case WM_T_I211:
   3632 		if (sc->sc_type == WM_T_82571) {
   3633 			/* Only 82571 shares port 0 */
   3634 			mask = EEMNGCTL_CFGDONE_0;
   3635 		} else
   3636 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3637 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3638 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3639 				break;
   3640 			delay(1000);
   3641 		}
   3642 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3643 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3644 				device_xname(sc->sc_dev), __func__));
   3645 		}
   3646 		break;
   3647 	case WM_T_ICH8:
   3648 	case WM_T_ICH9:
   3649 	case WM_T_ICH10:
   3650 	case WM_T_PCH:
   3651 	case WM_T_PCH2:
   3652 	case WM_T_PCH_LPT:
   3653 	case WM_T_PCH_SPT:
   3654 		delay(10*1000);
   3655 		if (sc->sc_type >= WM_T_ICH10)
   3656 			wm_lan_init_done(sc);
   3657 		else
   3658 			wm_get_auto_rd_done(sc);
   3659 
   3660 		reg = CSR_READ(sc, WMREG_STATUS);
   3661 		if ((reg & STATUS_PHYRA) != 0)
   3662 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3663 		break;
   3664 	default:
   3665 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3666 		    __func__);
   3667 		break;
   3668 	}
   3669 }
   3670 
   3671 void
   3672 wm_phy_post_reset(struct wm_softc *sc)
   3673 {
   3674 	uint32_t reg;
   3675 
   3676 	/* This function is only for ICH8 and newer. */
   3677 	if (sc->sc_type < WM_T_ICH8)
   3678 		return;
   3679 
   3680 	if (wm_phy_resetisblocked(sc)) {
   3681 		/* XXX */
   3682 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3683 		return;
   3684 	}
   3685 
   3686 	/* Allow time for h/w to get to quiescent state after reset */
   3687 	delay(10*1000);
   3688 
   3689 	/* Perform any necessary post-reset workarounds */
   3690 	if (sc->sc_type == WM_T_PCH)
   3691 		wm_hv_phy_workaround_ich8lan(sc);
   3692 	if (sc->sc_type == WM_T_PCH2)
   3693 		wm_lv_phy_workaround_ich8lan(sc);
   3694 
   3695 	/* Clear the host wakeup bit after lcd reset */
   3696 	if (sc->sc_type >= WM_T_PCH) {
   3697 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3698 		    BM_PORT_GEN_CFG);
   3699 		reg &= ~BM_WUC_HOST_WU_BIT;
   3700 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3701 		    BM_PORT_GEN_CFG, reg);
   3702 	}
   3703 
   3704 	/* Configure the LCD with the extended configuration region in NVM */
   3705 	wm_init_lcd_from_nvm(sc);
   3706 
   3707 	/* Configure the LCD with the OEM bits in NVM */
   3708 }
   3709 
   3710 /* Only for PCH and newer */
   3711 static void
   3712 wm_write_smbus_addr(struct wm_softc *sc)
   3713 {
   3714 	uint32_t strap, freq;
   3715 	uint32_t phy_data;
   3716 
   3717 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3718 		device_xname(sc->sc_dev), __func__));
   3719 
   3720 	strap = CSR_READ(sc, WMREG_STRAP);
   3721 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3722 
   3723 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3724 
   3725 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3726 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3727 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3728 
   3729 	if (sc->sc_phytype == WMPHY_I217) {
   3730 		/* Restore SMBus frequency */
   3731 		if (freq --) {
   3732 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3733 			    | HV_SMB_ADDR_FREQ_HIGH);
   3734 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3735 			    HV_SMB_ADDR_FREQ_LOW);
   3736 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3737 			    HV_SMB_ADDR_FREQ_HIGH);
   3738 		} else {
   3739 			DPRINTF(WM_DEBUG_INIT,
   3740 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3741 				device_xname(sc->sc_dev), __func__));
   3742 		}
   3743 	}
   3744 
   3745 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3746 }
   3747 
   3748 void
   3749 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3750 {
   3751 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3752 	uint16_t phy_page = 0;
   3753 
   3754 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3755 		device_xname(sc->sc_dev), __func__));
   3756 
   3757 	switch (sc->sc_type) {
   3758 	case WM_T_ICH8:
   3759 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3760 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3761 			return;
   3762 
   3763 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3764 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3765 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3766 			break;
   3767 		}
   3768 		/* FALLTHROUGH */
   3769 	case WM_T_PCH:
   3770 	case WM_T_PCH2:
   3771 	case WM_T_PCH_LPT:
   3772 	case WM_T_PCH_SPT:
   3773 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3774 		break;
   3775 	default:
   3776 		return;
   3777 	}
   3778 
   3779 	sc->phy.acquire(sc);
   3780 
   3781 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3782 	if ((reg & sw_cfg_mask) == 0)
   3783 		goto release;
   3784 
   3785 	/*
   3786 	 * Make sure HW does not configure LCD from PHY extended configuration
   3787 	 * before SW configuration
   3788 	 */
   3789 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3790 	if ((sc->sc_type < WM_T_PCH2)
   3791 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3792 		goto release;
   3793 
   3794 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3795 		device_xname(sc->sc_dev), __func__));
   3796 	/* word_addr is in DWORD */
   3797 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3798 
   3799 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3800 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3801 
   3802 	if (((sc->sc_type == WM_T_PCH)
   3803 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3804 	    || (sc->sc_type > WM_T_PCH)) {
   3805 		/*
   3806 		 * HW configures the SMBus address and LEDs when the OEM and
   3807 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3808 		 * are cleared, SW will configure them instead.
   3809 		 */
   3810 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3811 			device_xname(sc->sc_dev), __func__));
   3812 		wm_write_smbus_addr(sc);
   3813 
   3814 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3815 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3816 	}
   3817 
   3818 	/* Configure LCD from extended configuration region. */
   3819 	for (i = 0; i < cnf_size; i++) {
   3820 		uint16_t reg_data, reg_addr;
   3821 
   3822 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3823 			goto release;
   3824 
   3825 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3826 			goto release;
   3827 
   3828 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3829 			phy_page = reg_data;
   3830 
   3831 		reg_addr &= IGPHY_MAXREGADDR;
   3832 		reg_addr |= phy_page;
   3833 
   3834 		sc->phy.release(sc); /* XXX */
   3835 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3836 		sc->phy.acquire(sc); /* XXX */
   3837 	}
   3838 
   3839 release:
   3840 	sc->phy.release(sc);
   3841 	return;
   3842 }
   3843 
   3844 
   3845 /* Init hardware bits */
   3846 void
   3847 wm_initialize_hardware_bits(struct wm_softc *sc)
   3848 {
   3849 	uint32_t tarc0, tarc1, reg;
   3850 
   3851 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3852 		device_xname(sc->sc_dev), __func__));
   3853 
   3854 	/* For 82571 variant, 80003 and ICHs */
   3855 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3856 	    || (sc->sc_type >= WM_T_80003)) {
   3857 
   3858 		/* Transmit Descriptor Control 0 */
   3859 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3860 		reg |= TXDCTL_COUNT_DESC;
   3861 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3862 
   3863 		/* Transmit Descriptor Control 1 */
   3864 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3865 		reg |= TXDCTL_COUNT_DESC;
   3866 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3867 
   3868 		/* TARC0 */
   3869 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3870 		switch (sc->sc_type) {
   3871 		case WM_T_82571:
   3872 		case WM_T_82572:
   3873 		case WM_T_82573:
   3874 		case WM_T_82574:
   3875 		case WM_T_82583:
   3876 		case WM_T_80003:
   3877 			/* Clear bits 30..27 */
   3878 			tarc0 &= ~__BITS(30, 27);
   3879 			break;
   3880 		default:
   3881 			break;
   3882 		}
   3883 
   3884 		switch (sc->sc_type) {
   3885 		case WM_T_82571:
   3886 		case WM_T_82572:
   3887 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3888 
   3889 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3890 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3891 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3892 			/* 8257[12] Errata No.7 */
   3893 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3894 
   3895 			/* TARC1 bit 28 */
   3896 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3897 				tarc1 &= ~__BIT(28);
   3898 			else
   3899 				tarc1 |= __BIT(28);
   3900 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3901 
   3902 			/*
   3903 			 * 8257[12] Errata No.13
   3904 			 * Disable Dyamic Clock Gating.
   3905 			 */
   3906 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3907 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3908 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3909 			break;
   3910 		case WM_T_82573:
   3911 		case WM_T_82574:
   3912 		case WM_T_82583:
   3913 			if ((sc->sc_type == WM_T_82574)
   3914 			    || (sc->sc_type == WM_T_82583))
   3915 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3916 
   3917 			/* Extended Device Control */
   3918 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3919 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3920 			reg |= __BIT(22);	/* Set bit 22 */
   3921 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3922 
   3923 			/* Device Control */
   3924 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3925 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3926 
   3927 			/* PCIe Control Register */
   3928 			/*
   3929 			 * 82573 Errata (unknown).
   3930 			 *
   3931 			 * 82574 Errata 25 and 82583 Errata 12
   3932 			 * "Dropped Rx Packets":
   3933 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3934 			 */
   3935 			reg = CSR_READ(sc, WMREG_GCR);
   3936 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3937 			CSR_WRITE(sc, WMREG_GCR, reg);
   3938 
   3939 			if ((sc->sc_type == WM_T_82574)
   3940 			    || (sc->sc_type == WM_T_82583)) {
   3941 				/*
   3942 				 * Document says this bit must be set for
   3943 				 * proper operation.
   3944 				 */
   3945 				reg = CSR_READ(sc, WMREG_GCR);
   3946 				reg |= __BIT(22);
   3947 				CSR_WRITE(sc, WMREG_GCR, reg);
   3948 
   3949 				/*
   3950 				 * Apply workaround for hardware errata
   3951 				 * documented in errata docs Fixes issue where
   3952 				 * some error prone or unreliable PCIe
   3953 				 * completions are occurring, particularly
   3954 				 * with ASPM enabled. Without fix, issue can
   3955 				 * cause Tx timeouts.
   3956 				 */
   3957 				reg = CSR_READ(sc, WMREG_GCR2);
   3958 				reg |= __BIT(0);
   3959 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3960 			}
   3961 			break;
   3962 		case WM_T_80003:
   3963 			/* TARC0 */
   3964 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3965 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3966 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3967 
   3968 			/* TARC1 bit 28 */
   3969 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3970 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3971 				tarc1 &= ~__BIT(28);
   3972 			else
   3973 				tarc1 |= __BIT(28);
   3974 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3975 			break;
   3976 		case WM_T_ICH8:
   3977 		case WM_T_ICH9:
   3978 		case WM_T_ICH10:
   3979 		case WM_T_PCH:
   3980 		case WM_T_PCH2:
   3981 		case WM_T_PCH_LPT:
   3982 		case WM_T_PCH_SPT:
   3983 			/* TARC0 */
   3984 			if ((sc->sc_type == WM_T_ICH8)
   3985 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3986 				/* Set TARC0 bits 29 and 28 */
   3987 				tarc0 |= __BITS(29, 28);
   3988 			}
   3989 			/* Set TARC0 bits 23,24,26,27 */
   3990 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3991 
   3992 			/* CTRL_EXT */
   3993 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3994 			reg |= __BIT(22);	/* Set bit 22 */
   3995 			/*
   3996 			 * Enable PHY low-power state when MAC is at D3
   3997 			 * w/o WoL
   3998 			 */
   3999 			if (sc->sc_type >= WM_T_PCH)
   4000 				reg |= CTRL_EXT_PHYPDEN;
   4001 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4002 
   4003 			/* TARC1 */
   4004 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4005 			/* bit 28 */
   4006 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4007 				tarc1 &= ~__BIT(28);
   4008 			else
   4009 				tarc1 |= __BIT(28);
   4010 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4011 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4012 
   4013 			/* Device Status */
   4014 			if (sc->sc_type == WM_T_ICH8) {
   4015 				reg = CSR_READ(sc, WMREG_STATUS);
   4016 				reg &= ~__BIT(31);
   4017 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4018 
   4019 			}
   4020 
   4021 			/* IOSFPC */
   4022 			if (sc->sc_type == WM_T_PCH_SPT) {
   4023 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4024 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4025 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4026 			}
   4027 			/*
   4028 			 * Work-around descriptor data corruption issue during
   4029 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4030 			 * capability.
   4031 			 */
   4032 			reg = CSR_READ(sc, WMREG_RFCTL);
   4033 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4034 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4035 			break;
   4036 		default:
   4037 			break;
   4038 		}
   4039 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4040 
   4041 		switch (sc->sc_type) {
   4042 		/*
   4043 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4044 		 * Avoid RSS Hash Value bug.
   4045 		 */
   4046 		case WM_T_82571:
   4047 		case WM_T_82572:
   4048 		case WM_T_82573:
   4049 		case WM_T_80003:
   4050 		case WM_T_ICH8:
   4051 			reg = CSR_READ(sc, WMREG_RFCTL);
   4052 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4053 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4054 			break;
   4055 		case WM_T_82574:
   4056 			/* use extened Rx descriptor. */
   4057 			reg = CSR_READ(sc, WMREG_RFCTL);
   4058 			reg |= WMREG_RFCTL_EXSTEN;
   4059 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4060 			break;
   4061 		default:
   4062 			break;
   4063 		}
   4064 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4065 		/*
   4066 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4067 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4068 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4069 		 * Correctly by the Device"
   4070 		 *
   4071 		 * I354(C2000) Errata AVR53:
   4072 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4073 		 * Hang"
   4074 		 */
   4075 		reg = CSR_READ(sc, WMREG_RFCTL);
   4076 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4077 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4078 	}
   4079 }
   4080 
   4081 static uint32_t
   4082 wm_rxpbs_adjust_82580(uint32_t val)
   4083 {
   4084 	uint32_t rv = 0;
   4085 
   4086 	if (val < __arraycount(wm_82580_rxpbs_table))
   4087 		rv = wm_82580_rxpbs_table[val];
   4088 
   4089 	return rv;
   4090 }
   4091 
   4092 /*
   4093  * wm_reset_phy:
   4094  *
   4095  *	generic PHY reset function.
   4096  *	Same as e1000_phy_hw_reset_generic()
   4097  */
   4098 static void
   4099 wm_reset_phy(struct wm_softc *sc)
   4100 {
   4101 	uint32_t reg;
   4102 
   4103 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4104 		device_xname(sc->sc_dev), __func__));
   4105 	if (wm_phy_resetisblocked(sc))
   4106 		return;
   4107 
   4108 	sc->phy.acquire(sc);
   4109 
   4110 	reg = CSR_READ(sc, WMREG_CTRL);
   4111 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4112 	CSR_WRITE_FLUSH(sc);
   4113 
   4114 	delay(sc->phy.reset_delay_us);
   4115 
   4116 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4117 	CSR_WRITE_FLUSH(sc);
   4118 
   4119 	delay(150);
   4120 
   4121 	sc->phy.release(sc);
   4122 
   4123 	wm_get_cfg_done(sc);
   4124 	wm_phy_post_reset(sc);
   4125 }
   4126 
   4127 static void
   4128 wm_flush_desc_rings(struct wm_softc *sc)
   4129 {
   4130 	pcireg_t preg;
   4131 	uint32_t reg;
   4132 	struct wm_txqueue *txq;
   4133 	wiseman_txdesc_t *txd;
   4134 	int nexttx;
   4135 	uint32_t rctl;
   4136 
   4137 	/* First, disable MULR fix in FEXTNVM11 */
   4138 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4139 	reg |= FEXTNVM11_DIS_MULRFIX;
   4140 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4141 
   4142 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4143 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4144 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4145 		return;
   4146 
   4147 	/* TX */
   4148 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4149 	    device_xname(sc->sc_dev), preg, reg);
   4150 	reg = CSR_READ(sc, WMREG_TCTL);
   4151 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4152 
   4153 	txq = &sc->sc_queue[0].wmq_txq;
   4154 	nexttx = txq->txq_next;
   4155 	txd = &txq->txq_descs[nexttx];
   4156 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4157 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4158 	txd->wtx_fields.wtxu_status = 0;
   4159 	txd->wtx_fields.wtxu_options = 0;
   4160 	txd->wtx_fields.wtxu_vlan = 0;
   4161 
   4162 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4163 	    BUS_SPACE_BARRIER_WRITE);
   4164 
   4165 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4166 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4167 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4168 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4169 	delay(250);
   4170 
   4171 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4172 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4173 		return;
   4174 
   4175 	/* RX */
   4176 	printf("%s: Need RX flush (reg = %08x)\n",
   4177 	    device_xname(sc->sc_dev), preg);
   4178 	rctl = CSR_READ(sc, WMREG_RCTL);
   4179 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4180 	CSR_WRITE_FLUSH(sc);
   4181 	delay(150);
   4182 
   4183 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4184 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4185 	reg &= 0xffffc000;
   4186 	/*
   4187 	 * update thresholds: prefetch threshold to 31, host threshold
   4188 	 * to 1 and make sure the granularity is "descriptors" and not
   4189 	 * "cache lines"
   4190 	 */
   4191 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4192 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4193 
   4194 	/*
   4195 	 * momentarily enable the RX ring for the changes to take
   4196 	 * effect
   4197 	 */
   4198 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4199 	CSR_WRITE_FLUSH(sc);
   4200 	delay(150);
   4201 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4202 }
   4203 
   4204 /*
   4205  * wm_reset:
   4206  *
   4207  *	Reset the i82542 chip.
   4208  */
   4209 static void
   4210 wm_reset(struct wm_softc *sc)
   4211 {
   4212 	int phy_reset = 0;
   4213 	int i, error = 0;
   4214 	uint32_t reg;
   4215 
   4216 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4217 		device_xname(sc->sc_dev), __func__));
   4218 	KASSERT(sc->sc_type != 0);
   4219 
   4220 	/*
   4221 	 * Allocate on-chip memory according to the MTU size.
   4222 	 * The Packet Buffer Allocation register must be written
   4223 	 * before the chip is reset.
   4224 	 */
   4225 	switch (sc->sc_type) {
   4226 	case WM_T_82547:
   4227 	case WM_T_82547_2:
   4228 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4229 		    PBA_22K : PBA_30K;
   4230 		for (i = 0; i < sc->sc_nqueues; i++) {
   4231 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4232 			txq->txq_fifo_head = 0;
   4233 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4234 			txq->txq_fifo_size =
   4235 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4236 			txq->txq_fifo_stall = 0;
   4237 		}
   4238 		break;
   4239 	case WM_T_82571:
   4240 	case WM_T_82572:
   4241 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4242 	case WM_T_80003:
   4243 		sc->sc_pba = PBA_32K;
   4244 		break;
   4245 	case WM_T_82573:
   4246 		sc->sc_pba = PBA_12K;
   4247 		break;
   4248 	case WM_T_82574:
   4249 	case WM_T_82583:
   4250 		sc->sc_pba = PBA_20K;
   4251 		break;
   4252 	case WM_T_82576:
   4253 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4254 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4255 		break;
   4256 	case WM_T_82580:
   4257 	case WM_T_I350:
   4258 	case WM_T_I354:
   4259 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4260 		break;
   4261 	case WM_T_I210:
   4262 	case WM_T_I211:
   4263 		sc->sc_pba = PBA_34K;
   4264 		break;
   4265 	case WM_T_ICH8:
   4266 		/* Workaround for a bit corruption issue in FIFO memory */
   4267 		sc->sc_pba = PBA_8K;
   4268 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4269 		break;
   4270 	case WM_T_ICH9:
   4271 	case WM_T_ICH10:
   4272 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4273 		    PBA_14K : PBA_10K;
   4274 		break;
   4275 	case WM_T_PCH:
   4276 	case WM_T_PCH2:
   4277 	case WM_T_PCH_LPT:
   4278 	case WM_T_PCH_SPT:
   4279 		sc->sc_pba = PBA_26K;
   4280 		break;
   4281 	default:
   4282 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4283 		    PBA_40K : PBA_48K;
   4284 		break;
   4285 	}
   4286 	/*
   4287 	 * Only old or non-multiqueue devices have the PBA register
   4288 	 * XXX Need special handling for 82575.
   4289 	 */
   4290 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4291 	    || (sc->sc_type == WM_T_82575))
   4292 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4293 
   4294 	/* Prevent the PCI-E bus from sticking */
   4295 	if (sc->sc_flags & WM_F_PCIE) {
   4296 		int timeout = 800;
   4297 
   4298 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4299 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4300 
   4301 		while (timeout--) {
   4302 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4303 			    == 0)
   4304 				break;
   4305 			delay(100);
   4306 		}
   4307 		if (timeout == 0)
   4308 			device_printf(sc->sc_dev,
   4309 			    "failed to disable busmastering\n");
   4310 	}
   4311 
   4312 	/* Set the completion timeout for interface */
   4313 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4314 	    || (sc->sc_type == WM_T_82580)
   4315 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4316 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4317 		wm_set_pcie_completion_timeout(sc);
   4318 
   4319 	/* Clear interrupt */
   4320 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4321 	if (wm_is_using_msix(sc)) {
   4322 		if (sc->sc_type != WM_T_82574) {
   4323 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4324 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4325 		} else {
   4326 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4327 		}
   4328 	}
   4329 
   4330 	/* Stop the transmit and receive processes. */
   4331 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4332 	sc->sc_rctl &= ~RCTL_EN;
   4333 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4334 	CSR_WRITE_FLUSH(sc);
   4335 
   4336 	/* XXX set_tbi_sbp_82543() */
   4337 
   4338 	delay(10*1000);
   4339 
   4340 	/* Must acquire the MDIO ownership before MAC reset */
   4341 	switch (sc->sc_type) {
   4342 	case WM_T_82573:
   4343 	case WM_T_82574:
   4344 	case WM_T_82583:
   4345 		error = wm_get_hw_semaphore_82573(sc);
   4346 		break;
   4347 	default:
   4348 		break;
   4349 	}
   4350 
   4351 	/*
   4352 	 * 82541 Errata 29? & 82547 Errata 28?
   4353 	 * See also the description about PHY_RST bit in CTRL register
   4354 	 * in 8254x_GBe_SDM.pdf.
   4355 	 */
   4356 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4357 		CSR_WRITE(sc, WMREG_CTRL,
   4358 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4359 		CSR_WRITE_FLUSH(sc);
   4360 		delay(5000);
   4361 	}
   4362 
   4363 	switch (sc->sc_type) {
   4364 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4365 	case WM_T_82541:
   4366 	case WM_T_82541_2:
   4367 	case WM_T_82547:
   4368 	case WM_T_82547_2:
   4369 		/*
   4370 		 * On some chipsets, a reset through a memory-mapped write
   4371 		 * cycle can cause the chip to reset before completing the
   4372 		 * write cycle.  This causes major headache that can be
   4373 		 * avoided by issuing the reset via indirect register writes
   4374 		 * through I/O space.
   4375 		 *
   4376 		 * So, if we successfully mapped the I/O BAR at attach time,
   4377 		 * use that.  Otherwise, try our luck with a memory-mapped
   4378 		 * reset.
   4379 		 */
   4380 		if (sc->sc_flags & WM_F_IOH_VALID)
   4381 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4382 		else
   4383 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4384 		break;
   4385 	case WM_T_82545_3:
   4386 	case WM_T_82546_3:
   4387 		/* Use the shadow control register on these chips. */
   4388 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4389 		break;
   4390 	case WM_T_80003:
   4391 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4392 		sc->phy.acquire(sc);
   4393 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4394 		sc->phy.release(sc);
   4395 		break;
   4396 	case WM_T_ICH8:
   4397 	case WM_T_ICH9:
   4398 	case WM_T_ICH10:
   4399 	case WM_T_PCH:
   4400 	case WM_T_PCH2:
   4401 	case WM_T_PCH_LPT:
   4402 	case WM_T_PCH_SPT:
   4403 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4404 		if (wm_phy_resetisblocked(sc) == false) {
   4405 			/*
   4406 			 * Gate automatic PHY configuration by hardware on
   4407 			 * non-managed 82579
   4408 			 */
   4409 			if ((sc->sc_type == WM_T_PCH2)
   4410 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4411 				== 0))
   4412 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4413 
   4414 			reg |= CTRL_PHY_RESET;
   4415 			phy_reset = 1;
   4416 		} else
   4417 			printf("XXX reset is blocked!!!\n");
   4418 		sc->phy.acquire(sc);
   4419 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4420 		/* Don't insert a completion barrier when reset */
   4421 		delay(20*1000);
   4422 		mutex_exit(sc->sc_ich_phymtx);
   4423 		break;
   4424 	case WM_T_82580:
   4425 	case WM_T_I350:
   4426 	case WM_T_I354:
   4427 	case WM_T_I210:
   4428 	case WM_T_I211:
   4429 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4430 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4431 			CSR_WRITE_FLUSH(sc);
   4432 		delay(5000);
   4433 		break;
   4434 	case WM_T_82542_2_0:
   4435 	case WM_T_82542_2_1:
   4436 	case WM_T_82543:
   4437 	case WM_T_82540:
   4438 	case WM_T_82545:
   4439 	case WM_T_82546:
   4440 	case WM_T_82571:
   4441 	case WM_T_82572:
   4442 	case WM_T_82573:
   4443 	case WM_T_82574:
   4444 	case WM_T_82575:
   4445 	case WM_T_82576:
   4446 	case WM_T_82583:
   4447 	default:
   4448 		/* Everything else can safely use the documented method. */
   4449 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4450 		break;
   4451 	}
   4452 
   4453 	/* Must release the MDIO ownership after MAC reset */
   4454 	switch (sc->sc_type) {
   4455 	case WM_T_82573:
   4456 	case WM_T_82574:
   4457 	case WM_T_82583:
   4458 		if (error == 0)
   4459 			wm_put_hw_semaphore_82573(sc);
   4460 		break;
   4461 	default:
   4462 		break;
   4463 	}
   4464 
   4465 	if (phy_reset != 0)
   4466 		wm_get_cfg_done(sc);
   4467 
   4468 	/* reload EEPROM */
   4469 	switch (sc->sc_type) {
   4470 	case WM_T_82542_2_0:
   4471 	case WM_T_82542_2_1:
   4472 	case WM_T_82543:
   4473 	case WM_T_82544:
   4474 		delay(10);
   4475 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4476 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4477 		CSR_WRITE_FLUSH(sc);
   4478 		delay(2000);
   4479 		break;
   4480 	case WM_T_82540:
   4481 	case WM_T_82545:
   4482 	case WM_T_82545_3:
   4483 	case WM_T_82546:
   4484 	case WM_T_82546_3:
   4485 		delay(5*1000);
   4486 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4487 		break;
   4488 	case WM_T_82541:
   4489 	case WM_T_82541_2:
   4490 	case WM_T_82547:
   4491 	case WM_T_82547_2:
   4492 		delay(20000);
   4493 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4494 		break;
   4495 	case WM_T_82571:
   4496 	case WM_T_82572:
   4497 	case WM_T_82573:
   4498 	case WM_T_82574:
   4499 	case WM_T_82583:
   4500 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4501 			delay(10);
   4502 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4503 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4504 			CSR_WRITE_FLUSH(sc);
   4505 		}
   4506 		/* check EECD_EE_AUTORD */
   4507 		wm_get_auto_rd_done(sc);
   4508 		/*
   4509 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4510 		 * is set.
   4511 		 */
   4512 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4513 		    || (sc->sc_type == WM_T_82583))
   4514 			delay(25*1000);
   4515 		break;
   4516 	case WM_T_82575:
   4517 	case WM_T_82576:
   4518 	case WM_T_82580:
   4519 	case WM_T_I350:
   4520 	case WM_T_I354:
   4521 	case WM_T_I210:
   4522 	case WM_T_I211:
   4523 	case WM_T_80003:
   4524 		/* check EECD_EE_AUTORD */
   4525 		wm_get_auto_rd_done(sc);
   4526 		break;
   4527 	case WM_T_ICH8:
   4528 	case WM_T_ICH9:
   4529 	case WM_T_ICH10:
   4530 	case WM_T_PCH:
   4531 	case WM_T_PCH2:
   4532 	case WM_T_PCH_LPT:
   4533 	case WM_T_PCH_SPT:
   4534 		break;
   4535 	default:
   4536 		panic("%s: unknown type\n", __func__);
   4537 	}
   4538 
   4539 	/* Check whether EEPROM is present or not */
   4540 	switch (sc->sc_type) {
   4541 	case WM_T_82575:
   4542 	case WM_T_82576:
   4543 	case WM_T_82580:
   4544 	case WM_T_I350:
   4545 	case WM_T_I354:
   4546 	case WM_T_ICH8:
   4547 	case WM_T_ICH9:
   4548 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4549 			/* Not found */
   4550 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4551 			if (sc->sc_type == WM_T_82575)
   4552 				wm_reset_init_script_82575(sc);
   4553 		}
   4554 		break;
   4555 	default:
   4556 		break;
   4557 	}
   4558 
   4559 	if (phy_reset != 0)
   4560 		wm_phy_post_reset(sc);
   4561 
   4562 	if ((sc->sc_type == WM_T_82580)
   4563 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4564 		/* clear global device reset status bit */
   4565 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4566 	}
   4567 
   4568 	/* Clear any pending interrupt events. */
   4569 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4570 	reg = CSR_READ(sc, WMREG_ICR);
   4571 	if (wm_is_using_msix(sc)) {
   4572 		if (sc->sc_type != WM_T_82574) {
   4573 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4574 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4575 		} else
   4576 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4577 	}
   4578 
   4579 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4580 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4581 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4582 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4583 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4584 		reg |= KABGTXD_BGSQLBIAS;
   4585 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4586 	}
   4587 
   4588 	/* reload sc_ctrl */
   4589 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4590 
   4591 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4592 		wm_set_eee_i350(sc);
   4593 
   4594 	/*
   4595 	 * For PCH, this write will make sure that any noise will be detected
   4596 	 * as a CRC error and be dropped rather than show up as a bad packet
   4597 	 * to the DMA engine
   4598 	 */
   4599 	if (sc->sc_type == WM_T_PCH)
   4600 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4601 
   4602 	if (sc->sc_type >= WM_T_82544)
   4603 		CSR_WRITE(sc, WMREG_WUC, 0);
   4604 
   4605 	wm_reset_mdicnfg_82580(sc);
   4606 
   4607 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4608 		wm_pll_workaround_i210(sc);
   4609 }
   4610 
   4611 /*
   4612  * wm_add_rxbuf:
   4613  *
   4614  *	Add a receive buffer to the indiciated descriptor.
   4615  */
   4616 static int
   4617 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4618 {
   4619 	struct wm_softc *sc = rxq->rxq_sc;
   4620 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4621 	struct mbuf *m;
   4622 	int error;
   4623 
   4624 	KASSERT(mutex_owned(rxq->rxq_lock));
   4625 
   4626 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4627 	if (m == NULL)
   4628 		return ENOBUFS;
   4629 
   4630 	MCLGET(m, M_DONTWAIT);
   4631 	if ((m->m_flags & M_EXT) == 0) {
   4632 		m_freem(m);
   4633 		return ENOBUFS;
   4634 	}
   4635 
   4636 	if (rxs->rxs_mbuf != NULL)
   4637 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4638 
   4639 	rxs->rxs_mbuf = m;
   4640 
   4641 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4642 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4643 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4644 	if (error) {
   4645 		/* XXX XXX XXX */
   4646 		aprint_error_dev(sc->sc_dev,
   4647 		    "unable to load rx DMA map %d, error = %d\n",
   4648 		    idx, error);
   4649 		panic("wm_add_rxbuf");
   4650 	}
   4651 
   4652 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4653 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4654 
   4655 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4656 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4657 			wm_init_rxdesc(rxq, idx);
   4658 	} else
   4659 		wm_init_rxdesc(rxq, idx);
   4660 
   4661 	return 0;
   4662 }
   4663 
   4664 /*
   4665  * wm_rxdrain:
   4666  *
   4667  *	Drain the receive queue.
   4668  */
   4669 static void
   4670 wm_rxdrain(struct wm_rxqueue *rxq)
   4671 {
   4672 	struct wm_softc *sc = rxq->rxq_sc;
   4673 	struct wm_rxsoft *rxs;
   4674 	int i;
   4675 
   4676 	KASSERT(mutex_owned(rxq->rxq_lock));
   4677 
   4678 	for (i = 0; i < WM_NRXDESC; i++) {
   4679 		rxs = &rxq->rxq_soft[i];
   4680 		if (rxs->rxs_mbuf != NULL) {
   4681 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4682 			m_freem(rxs->rxs_mbuf);
   4683 			rxs->rxs_mbuf = NULL;
   4684 		}
   4685 	}
   4686 }
   4687 
   4688 
   4689 /*
   4690  * XXX copy from FreeBSD's sys/net/rss_config.c
   4691  */
   4692 /*
   4693  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4694  * effectiveness may be limited by algorithm choice and available entropy
   4695  * during the boot.
   4696  *
   4697  * XXXRW: And that we don't randomize it yet!
   4698  *
   4699  * This is the default Microsoft RSS specification key which is also
   4700  * the Chelsio T5 firmware default key.
   4701  */
   4702 #define RSS_KEYSIZE 40
   4703 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4704 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4705 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4706 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4707 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4708 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4709 };
   4710 
   4711 /*
   4712  * Caller must pass an array of size sizeof(rss_key).
   4713  *
   4714  * XXX
   4715  * As if_ixgbe may use this function, this function should not be
   4716  * if_wm specific function.
   4717  */
   4718 static void
   4719 wm_rss_getkey(uint8_t *key)
   4720 {
   4721 
   4722 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4723 }
   4724 
   4725 /*
   4726  * Setup registers for RSS.
   4727  *
   4728  * XXX not yet VMDq support
   4729  */
   4730 static void
   4731 wm_init_rss(struct wm_softc *sc)
   4732 {
   4733 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4734 	int i;
   4735 
   4736 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4737 
   4738 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4739 		int qid, reta_ent;
   4740 
   4741 		qid  = i % sc->sc_nqueues;
   4742 		switch(sc->sc_type) {
   4743 		case WM_T_82574:
   4744 			reta_ent = __SHIFTIN(qid,
   4745 			    RETA_ENT_QINDEX_MASK_82574);
   4746 			break;
   4747 		case WM_T_82575:
   4748 			reta_ent = __SHIFTIN(qid,
   4749 			    RETA_ENT_QINDEX1_MASK_82575);
   4750 			break;
   4751 		default:
   4752 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4753 			break;
   4754 		}
   4755 
   4756 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4757 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4758 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4759 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4760 	}
   4761 
   4762 	wm_rss_getkey((uint8_t *)rss_key);
   4763 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4764 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4765 
   4766 	if (sc->sc_type == WM_T_82574)
   4767 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4768 	else
   4769 		mrqc = MRQC_ENABLE_RSS_MQ;
   4770 
   4771 	/*
   4772 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4773 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4774 	 */
   4775 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4776 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4777 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4778 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4779 
   4780 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4781 }
   4782 
   4783 /*
   4784  * Adjust TX and RX queue numbers which the system actulally uses.
   4785  *
   4786  * The numbers are affected by below parameters.
   4787  *     - The nubmer of hardware queues
   4788  *     - The number of MSI-X vectors (= "nvectors" argument)
   4789  *     - ncpu
   4790  */
   4791 static void
   4792 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4793 {
   4794 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4795 
   4796 	if (nvectors < 2) {
   4797 		sc->sc_nqueues = 1;
   4798 		return;
   4799 	}
   4800 
   4801 	switch(sc->sc_type) {
   4802 	case WM_T_82572:
   4803 		hw_ntxqueues = 2;
   4804 		hw_nrxqueues = 2;
   4805 		break;
   4806 	case WM_T_82574:
   4807 		hw_ntxqueues = 2;
   4808 		hw_nrxqueues = 2;
   4809 		break;
   4810 	case WM_T_82575:
   4811 		hw_ntxqueues = 4;
   4812 		hw_nrxqueues = 4;
   4813 		break;
   4814 	case WM_T_82576:
   4815 		hw_ntxqueues = 16;
   4816 		hw_nrxqueues = 16;
   4817 		break;
   4818 	case WM_T_82580:
   4819 	case WM_T_I350:
   4820 	case WM_T_I354:
   4821 		hw_ntxqueues = 8;
   4822 		hw_nrxqueues = 8;
   4823 		break;
   4824 	case WM_T_I210:
   4825 		hw_ntxqueues = 4;
   4826 		hw_nrxqueues = 4;
   4827 		break;
   4828 	case WM_T_I211:
   4829 		hw_ntxqueues = 2;
   4830 		hw_nrxqueues = 2;
   4831 		break;
   4832 		/*
   4833 		 * As below ethernet controllers does not support MSI-X,
   4834 		 * this driver let them not use multiqueue.
   4835 		 *     - WM_T_80003
   4836 		 *     - WM_T_ICH8
   4837 		 *     - WM_T_ICH9
   4838 		 *     - WM_T_ICH10
   4839 		 *     - WM_T_PCH
   4840 		 *     - WM_T_PCH2
   4841 		 *     - WM_T_PCH_LPT
   4842 		 */
   4843 	default:
   4844 		hw_ntxqueues = 1;
   4845 		hw_nrxqueues = 1;
   4846 		break;
   4847 	}
   4848 
   4849 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4850 
   4851 	/*
   4852 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4853 	 * the number of queues used actually.
   4854 	 */
   4855 	if (nvectors < hw_nqueues + 1) {
   4856 		sc->sc_nqueues = nvectors - 1;
   4857 	} else {
   4858 		sc->sc_nqueues = hw_nqueues;
   4859 	}
   4860 
   4861 	/*
   4862 	 * As queues more then cpus cannot improve scaling, we limit
   4863 	 * the number of queues used actually.
   4864 	 */
   4865 	if (ncpu < sc->sc_nqueues)
   4866 		sc->sc_nqueues = ncpu;
   4867 }
   4868 
   4869 static inline bool
   4870 wm_is_using_msix(struct wm_softc *sc)
   4871 {
   4872 
   4873 	return (sc->sc_nintrs > 1);
   4874 }
   4875 
   4876 static inline bool
   4877 wm_is_using_multiqueue(struct wm_softc *sc)
   4878 {
   4879 
   4880 	return (sc->sc_nqueues > 1);
   4881 }
   4882 
   4883 static int
   4884 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4885 {
   4886 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4887 	wmq->wmq_id = qidx;
   4888 	wmq->wmq_intr_idx = intr_idx;
   4889 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4890 #ifdef WM_MPSAFE
   4891 	    | SOFTINT_MPSAFE
   4892 #endif
   4893 	    , wm_handle_queue, wmq);
   4894 	if (wmq->wmq_si != NULL)
   4895 		return 0;
   4896 
   4897 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4898 	    wmq->wmq_id);
   4899 
   4900 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4901 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4902 	return ENOMEM;
   4903 }
   4904 
   4905 /*
   4906  * Both single interrupt MSI and INTx can use this function.
   4907  */
   4908 static int
   4909 wm_setup_legacy(struct wm_softc *sc)
   4910 {
   4911 	pci_chipset_tag_t pc = sc->sc_pc;
   4912 	const char *intrstr = NULL;
   4913 	char intrbuf[PCI_INTRSTR_LEN];
   4914 	int error;
   4915 
   4916 	error = wm_alloc_txrx_queues(sc);
   4917 	if (error) {
   4918 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4919 		    error);
   4920 		return ENOMEM;
   4921 	}
   4922 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4923 	    sizeof(intrbuf));
   4924 #ifdef WM_MPSAFE
   4925 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4926 #endif
   4927 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4928 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4929 	if (sc->sc_ihs[0] == NULL) {
   4930 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4931 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4932 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4933 		return ENOMEM;
   4934 	}
   4935 
   4936 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4937 	sc->sc_nintrs = 1;
   4938 
   4939 	return wm_softint_establish(sc, 0, 0);
   4940 }
   4941 
   4942 static int
   4943 wm_setup_msix(struct wm_softc *sc)
   4944 {
   4945 	void *vih;
   4946 	kcpuset_t *affinity;
   4947 	int qidx, error, intr_idx, txrx_established;
   4948 	pci_chipset_tag_t pc = sc->sc_pc;
   4949 	const char *intrstr = NULL;
   4950 	char intrbuf[PCI_INTRSTR_LEN];
   4951 	char intr_xname[INTRDEVNAMEBUF];
   4952 
   4953 	if (sc->sc_nqueues < ncpu) {
   4954 		/*
   4955 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4956 		 * interrupts start from CPU#1.
   4957 		 */
   4958 		sc->sc_affinity_offset = 1;
   4959 	} else {
   4960 		/*
   4961 		 * In this case, this device use all CPUs. So, we unify
   4962 		 * affinitied cpu_index to msix vector number for readability.
   4963 		 */
   4964 		sc->sc_affinity_offset = 0;
   4965 	}
   4966 
   4967 	error = wm_alloc_txrx_queues(sc);
   4968 	if (error) {
   4969 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4970 		    error);
   4971 		return ENOMEM;
   4972 	}
   4973 
   4974 	kcpuset_create(&affinity, false);
   4975 	intr_idx = 0;
   4976 
   4977 	/*
   4978 	 * TX and RX
   4979 	 */
   4980 	txrx_established = 0;
   4981 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4982 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4983 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4984 
   4985 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4986 		    sizeof(intrbuf));
   4987 #ifdef WM_MPSAFE
   4988 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4989 		    PCI_INTR_MPSAFE, true);
   4990 #endif
   4991 		memset(intr_xname, 0, sizeof(intr_xname));
   4992 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4993 		    device_xname(sc->sc_dev), qidx);
   4994 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4995 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4996 		if (vih == NULL) {
   4997 			aprint_error_dev(sc->sc_dev,
   4998 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4999 			    intrstr ? " at " : "",
   5000 			    intrstr ? intrstr : "");
   5001 
   5002 			goto fail;
   5003 		}
   5004 		kcpuset_zero(affinity);
   5005 		/* Round-robin affinity */
   5006 		kcpuset_set(affinity, affinity_to);
   5007 		error = interrupt_distribute(vih, affinity, NULL);
   5008 		if (error == 0) {
   5009 			aprint_normal_dev(sc->sc_dev,
   5010 			    "for TX and RX interrupting at %s affinity to %u\n",
   5011 			    intrstr, affinity_to);
   5012 		} else {
   5013 			aprint_normal_dev(sc->sc_dev,
   5014 			    "for TX and RX interrupting at %s\n", intrstr);
   5015 		}
   5016 		sc->sc_ihs[intr_idx] = vih;
   5017 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5018 			goto fail;
   5019 		txrx_established++;
   5020 		intr_idx++;
   5021 	}
   5022 
   5023 	/*
   5024 	 * LINK
   5025 	 */
   5026 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5027 	    sizeof(intrbuf));
   5028 #ifdef WM_MPSAFE
   5029 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5030 #endif
   5031 	memset(intr_xname, 0, sizeof(intr_xname));
   5032 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5033 	    device_xname(sc->sc_dev));
   5034 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5035 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5036 	if (vih == NULL) {
   5037 		aprint_error_dev(sc->sc_dev,
   5038 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5039 		    intrstr ? " at " : "",
   5040 		    intrstr ? intrstr : "");
   5041 
   5042 		goto fail;
   5043 	}
   5044 	/* keep default affinity to LINK interrupt */
   5045 	aprint_normal_dev(sc->sc_dev,
   5046 	    "for LINK interrupting at %s\n", intrstr);
   5047 	sc->sc_ihs[intr_idx] = vih;
   5048 	sc->sc_link_intr_idx = intr_idx;
   5049 
   5050 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5051 	kcpuset_destroy(affinity);
   5052 	return 0;
   5053 
   5054  fail:
   5055 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5056 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5057 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5058 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5059 	}
   5060 
   5061 	kcpuset_destroy(affinity);
   5062 	return ENOMEM;
   5063 }
   5064 
   5065 static void
   5066 wm_turnon(struct wm_softc *sc)
   5067 {
   5068 	int i;
   5069 
   5070 	KASSERT(WM_CORE_LOCKED(sc));
   5071 
   5072 	/*
   5073 	 * must unset stopping flags in ascending order.
   5074 	 */
   5075 	for(i = 0; i < sc->sc_nqueues; i++) {
   5076 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5077 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5078 
   5079 		mutex_enter(txq->txq_lock);
   5080 		txq->txq_stopping = false;
   5081 		mutex_exit(txq->txq_lock);
   5082 
   5083 		mutex_enter(rxq->rxq_lock);
   5084 		rxq->rxq_stopping = false;
   5085 		mutex_exit(rxq->rxq_lock);
   5086 	}
   5087 
   5088 	sc->sc_core_stopping = false;
   5089 }
   5090 
   5091 static void
   5092 wm_turnoff(struct wm_softc *sc)
   5093 {
   5094 	int i;
   5095 
   5096 	KASSERT(WM_CORE_LOCKED(sc));
   5097 
   5098 	sc->sc_core_stopping = true;
   5099 
   5100 	/*
   5101 	 * must set stopping flags in ascending order.
   5102 	 */
   5103 	for(i = 0; i < sc->sc_nqueues; i++) {
   5104 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5105 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5106 
   5107 		mutex_enter(rxq->rxq_lock);
   5108 		rxq->rxq_stopping = true;
   5109 		mutex_exit(rxq->rxq_lock);
   5110 
   5111 		mutex_enter(txq->txq_lock);
   5112 		txq->txq_stopping = true;
   5113 		mutex_exit(txq->txq_lock);
   5114 	}
   5115 }
   5116 
   5117 /*
   5118  * write interrupt interval value to ITR or EITR
   5119  */
   5120 static void
   5121 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5122 {
   5123 
   5124 	if (!wmq->wmq_set_itr)
   5125 		return;
   5126 
   5127 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5128 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5129 
   5130 		/*
   5131 		 * 82575 doesn't have CNT_INGR field.
   5132 		 * So, overwrite counter field by software.
   5133 		 */
   5134 		if (sc->sc_type == WM_T_82575)
   5135 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5136 		else
   5137 			eitr |= EITR_CNT_INGR;
   5138 
   5139 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5140 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5141 		/*
   5142 		 * 82574 has both ITR and EITR. SET EITR when we use
   5143 		 * the multi queue function with MSI-X.
   5144 		 */
   5145 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5146 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5147 	} else {
   5148 		KASSERT(wmq->wmq_id == 0);
   5149 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5150 	}
   5151 
   5152 	wmq->wmq_set_itr = false;
   5153 }
   5154 
   5155 /*
   5156  * TODO
   5157  * Below dynamic calculation of itr is almost the same as linux igb,
   5158  * however it does not fit to wm(4). So, we will have been disable AIM
   5159  * until we will find appropriate calculation of itr.
   5160  */
   5161 /*
   5162  * calculate interrupt interval value to be going to write register in
   5163  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5164  */
   5165 static void
   5166 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5167 {
   5168 #ifdef NOTYET
   5169 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5170 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5171 	uint32_t avg_size = 0;
   5172 	uint32_t new_itr;
   5173 
   5174 	if (rxq->rxq_packets)
   5175 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5176 	if (txq->txq_packets)
   5177 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5178 
   5179 	if (avg_size == 0) {
   5180 		new_itr = 450; /* restore default value */
   5181 		goto out;
   5182 	}
   5183 
   5184 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5185 	avg_size += 24;
   5186 
   5187 	/* Don't starve jumbo frames */
   5188 	avg_size = min(avg_size, 3000);
   5189 
   5190 	/* Give a little boost to mid-size frames */
   5191 	if ((avg_size > 300) && (avg_size < 1200))
   5192 		new_itr = avg_size / 3;
   5193 	else
   5194 		new_itr = avg_size / 2;
   5195 
   5196 out:
   5197 	/*
   5198 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5199 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5200 	 */
   5201 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5202 		new_itr *= 4;
   5203 
   5204 	if (new_itr != wmq->wmq_itr) {
   5205 		wmq->wmq_itr = new_itr;
   5206 		wmq->wmq_set_itr = true;
   5207 	} else
   5208 		wmq->wmq_set_itr = false;
   5209 
   5210 	rxq->rxq_packets = 0;
   5211 	rxq->rxq_bytes = 0;
   5212 	txq->txq_packets = 0;
   5213 	txq->txq_bytes = 0;
   5214 #endif
   5215 }
   5216 
   5217 /*
   5218  * wm_init:		[ifnet interface function]
   5219  *
   5220  *	Initialize the interface.
   5221  */
   5222 static int
   5223 wm_init(struct ifnet *ifp)
   5224 {
   5225 	struct wm_softc *sc = ifp->if_softc;
   5226 	int ret;
   5227 
   5228 	WM_CORE_LOCK(sc);
   5229 	ret = wm_init_locked(ifp);
   5230 	WM_CORE_UNLOCK(sc);
   5231 
   5232 	return ret;
   5233 }
   5234 
   5235 static int
   5236 wm_init_locked(struct ifnet *ifp)
   5237 {
   5238 	struct wm_softc *sc = ifp->if_softc;
   5239 	int i, j, trynum, error = 0;
   5240 	uint32_t reg;
   5241 
   5242 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5243 		device_xname(sc->sc_dev), __func__));
   5244 	KASSERT(WM_CORE_LOCKED(sc));
   5245 
   5246 	/*
   5247 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5248 	 * There is a small but measurable benefit to avoiding the adjusment
   5249 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5250 	 * on such platforms.  One possibility is that the DMA itself is
   5251 	 * slightly more efficient if the front of the entire packet (instead
   5252 	 * of the front of the headers) is aligned.
   5253 	 *
   5254 	 * Note we must always set align_tweak to 0 if we are using
   5255 	 * jumbo frames.
   5256 	 */
   5257 #ifdef __NO_STRICT_ALIGNMENT
   5258 	sc->sc_align_tweak = 0;
   5259 #else
   5260 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5261 		sc->sc_align_tweak = 0;
   5262 	else
   5263 		sc->sc_align_tweak = 2;
   5264 #endif /* __NO_STRICT_ALIGNMENT */
   5265 
   5266 	/* Cancel any pending I/O. */
   5267 	wm_stop_locked(ifp, 0);
   5268 
   5269 	/* update statistics before reset */
   5270 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5271 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5272 
   5273 	/* PCH_SPT hardware workaround */
   5274 	if (sc->sc_type == WM_T_PCH_SPT)
   5275 		wm_flush_desc_rings(sc);
   5276 
   5277 	/* Reset the chip to a known state. */
   5278 	wm_reset(sc);
   5279 
   5280 	/*
   5281 	 * AMT based hardware can now take control from firmware
   5282 	 * Do this after reset.
   5283 	 */
   5284 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5285 		wm_get_hw_control(sc);
   5286 
   5287 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5288 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5289 		wm_legacy_irq_quirk_spt(sc);
   5290 
   5291 	/* Init hardware bits */
   5292 	wm_initialize_hardware_bits(sc);
   5293 
   5294 	/* Reset the PHY. */
   5295 	if (sc->sc_flags & WM_F_HAS_MII)
   5296 		wm_gmii_reset(sc);
   5297 
   5298 	/* Calculate (E)ITR value */
   5299 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5300 		/*
   5301 		 * For NEWQUEUE's EITR (except for 82575).
   5302 		 * 82575's EITR should be set same throttling value as other
   5303 		 * old controllers' ITR because the interrupt/sec calculation
   5304 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5305 		 *
   5306 		 * 82574's EITR should be set same throttling value as ITR.
   5307 		 *
   5308 		 * For N interrupts/sec, set this value to:
   5309 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5310 		 */
   5311 		sc->sc_itr_init = 450;
   5312 	} else if (sc->sc_type >= WM_T_82543) {
   5313 		/*
   5314 		 * Set up the interrupt throttling register (units of 256ns)
   5315 		 * Note that a footnote in Intel's documentation says this
   5316 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5317 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5318 		 * that that is also true for the 1024ns units of the other
   5319 		 * interrupt-related timer registers -- so, really, we ought
   5320 		 * to divide this value by 4 when the link speed is low.
   5321 		 *
   5322 		 * XXX implement this division at link speed change!
   5323 		 */
   5324 
   5325 		/*
   5326 		 * For N interrupts/sec, set this value to:
   5327 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5328 		 * absolute and packet timer values to this value
   5329 		 * divided by 4 to get "simple timer" behavior.
   5330 		 */
   5331 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5332 	}
   5333 
   5334 	error = wm_init_txrx_queues(sc);
   5335 	if (error)
   5336 		goto out;
   5337 
   5338 	/*
   5339 	 * Clear out the VLAN table -- we don't use it (yet).
   5340 	 */
   5341 	CSR_WRITE(sc, WMREG_VET, 0);
   5342 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5343 		trynum = 10; /* Due to hw errata */
   5344 	else
   5345 		trynum = 1;
   5346 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5347 		for (j = 0; j < trynum; j++)
   5348 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5349 
   5350 	/*
   5351 	 * Set up flow-control parameters.
   5352 	 *
   5353 	 * XXX Values could probably stand some tuning.
   5354 	 */
   5355 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5356 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5357 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5358 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5359 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5360 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5361 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5362 	}
   5363 
   5364 	sc->sc_fcrtl = FCRTL_DFLT;
   5365 	if (sc->sc_type < WM_T_82543) {
   5366 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5367 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5368 	} else {
   5369 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5370 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5371 	}
   5372 
   5373 	if (sc->sc_type == WM_T_80003)
   5374 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5375 	else
   5376 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5377 
   5378 	/* Writes the control register. */
   5379 	wm_set_vlan(sc);
   5380 
   5381 	if (sc->sc_flags & WM_F_HAS_MII) {
   5382 		int val;
   5383 
   5384 		switch (sc->sc_type) {
   5385 		case WM_T_80003:
   5386 		case WM_T_ICH8:
   5387 		case WM_T_ICH9:
   5388 		case WM_T_ICH10:
   5389 		case WM_T_PCH:
   5390 		case WM_T_PCH2:
   5391 		case WM_T_PCH_LPT:
   5392 		case WM_T_PCH_SPT:
   5393 			/*
   5394 			 * Set the mac to wait the maximum time between each
   5395 			 * iteration and increase the max iterations when
   5396 			 * polling the phy; this fixes erroneous timeouts at
   5397 			 * 10Mbps.
   5398 			 */
   5399 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5400 			    0xFFFF);
   5401 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5402 			val |= 0x3F;
   5403 			wm_kmrn_writereg(sc,
   5404 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5405 			break;
   5406 		default:
   5407 			break;
   5408 		}
   5409 
   5410 		if (sc->sc_type == WM_T_80003) {
   5411 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5412 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5413 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5414 
   5415 			/* Bypass RX and TX FIFO's */
   5416 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5417 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5418 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5419 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5420 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5421 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5422 		}
   5423 	}
   5424 #if 0
   5425 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5426 #endif
   5427 
   5428 	/* Set up checksum offload parameters. */
   5429 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5430 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5431 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5432 		reg |= RXCSUM_IPOFL;
   5433 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5434 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5435 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5436 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5437 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5438 
   5439 	/* Set registers about MSI-X */
   5440 	if (wm_is_using_msix(sc)) {
   5441 		uint32_t ivar;
   5442 		struct wm_queue *wmq;
   5443 		int qid, qintr_idx;
   5444 
   5445 		if (sc->sc_type == WM_T_82575) {
   5446 			/* Interrupt control */
   5447 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5448 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5449 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5450 
   5451 			/* TX and RX */
   5452 			for (i = 0; i < sc->sc_nqueues; i++) {
   5453 				wmq = &sc->sc_queue[i];
   5454 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5455 				    EITR_TX_QUEUE(wmq->wmq_id)
   5456 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5457 			}
   5458 			/* Link status */
   5459 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5460 			    EITR_OTHER);
   5461 		} else if (sc->sc_type == WM_T_82574) {
   5462 			/* Interrupt control */
   5463 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5464 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5465 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5466 
   5467 			/*
   5468 			 * workaround issue with spurious interrupts
   5469 			 * in MSI-X mode.
   5470 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5471 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5472 			 */
   5473 			reg = CSR_READ(sc, WMREG_RFCTL);
   5474 			reg |= WMREG_RFCTL_ACKDIS;
   5475 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5476 
   5477 			ivar = 0;
   5478 			/* TX and RX */
   5479 			for (i = 0; i < sc->sc_nqueues; i++) {
   5480 				wmq = &sc->sc_queue[i];
   5481 				qid = wmq->wmq_id;
   5482 				qintr_idx = wmq->wmq_intr_idx;
   5483 
   5484 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5485 				    IVAR_TX_MASK_Q_82574(qid));
   5486 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5487 				    IVAR_RX_MASK_Q_82574(qid));
   5488 			}
   5489 			/* Link status */
   5490 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5491 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5492 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5493 		} else {
   5494 			/* Interrupt control */
   5495 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5496 			    | GPIE_EIAME | GPIE_PBA);
   5497 
   5498 			switch (sc->sc_type) {
   5499 			case WM_T_82580:
   5500 			case WM_T_I350:
   5501 			case WM_T_I354:
   5502 			case WM_T_I210:
   5503 			case WM_T_I211:
   5504 				/* TX and RX */
   5505 				for (i = 0; i < sc->sc_nqueues; i++) {
   5506 					wmq = &sc->sc_queue[i];
   5507 					qid = wmq->wmq_id;
   5508 					qintr_idx = wmq->wmq_intr_idx;
   5509 
   5510 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5511 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5512 					ivar |= __SHIFTIN((qintr_idx
   5513 						| IVAR_VALID),
   5514 					    IVAR_TX_MASK_Q(qid));
   5515 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5516 					ivar |= __SHIFTIN((qintr_idx
   5517 						| IVAR_VALID),
   5518 					    IVAR_RX_MASK_Q(qid));
   5519 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5520 				}
   5521 				break;
   5522 			case WM_T_82576:
   5523 				/* TX and RX */
   5524 				for (i = 0; i < sc->sc_nqueues; i++) {
   5525 					wmq = &sc->sc_queue[i];
   5526 					qid = wmq->wmq_id;
   5527 					qintr_idx = wmq->wmq_intr_idx;
   5528 
   5529 					ivar = CSR_READ(sc,
   5530 					    WMREG_IVAR_Q_82576(qid));
   5531 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5532 					ivar |= __SHIFTIN((qintr_idx
   5533 						| IVAR_VALID),
   5534 					    IVAR_TX_MASK_Q_82576(qid));
   5535 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5536 					ivar |= __SHIFTIN((qintr_idx
   5537 						| IVAR_VALID),
   5538 					    IVAR_RX_MASK_Q_82576(qid));
   5539 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5540 					    ivar);
   5541 				}
   5542 				break;
   5543 			default:
   5544 				break;
   5545 			}
   5546 
   5547 			/* Link status */
   5548 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5549 			    IVAR_MISC_OTHER);
   5550 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5551 		}
   5552 
   5553 		if (wm_is_using_multiqueue(sc)) {
   5554 			wm_init_rss(sc);
   5555 
   5556 			/*
   5557 			** NOTE: Receive Full-Packet Checksum Offload
   5558 			** is mutually exclusive with Multiqueue. However
   5559 			** this is not the same as TCP/IP checksums which
   5560 			** still work.
   5561 			*/
   5562 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5563 			reg |= RXCSUM_PCSD;
   5564 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5565 		}
   5566 	}
   5567 
   5568 	/* Set up the interrupt registers. */
   5569 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5570 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5571 	    ICR_RXO | ICR_RXT0;
   5572 	if (wm_is_using_msix(sc)) {
   5573 		uint32_t mask;
   5574 		struct wm_queue *wmq;
   5575 
   5576 		switch (sc->sc_type) {
   5577 		case WM_T_82574:
   5578 			mask = 0;
   5579 			for (i = 0; i < sc->sc_nqueues; i++) {
   5580 				wmq = &sc->sc_queue[i];
   5581 				mask |= ICR_TXQ(wmq->wmq_id);
   5582 				mask |= ICR_RXQ(wmq->wmq_id);
   5583 			}
   5584 			mask |= ICR_OTHER;
   5585 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5586 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5587 			break;
   5588 		default:
   5589 			if (sc->sc_type == WM_T_82575) {
   5590 				mask = 0;
   5591 				for (i = 0; i < sc->sc_nqueues; i++) {
   5592 					wmq = &sc->sc_queue[i];
   5593 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5594 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5595 				}
   5596 				mask |= EITR_OTHER;
   5597 			} else {
   5598 				mask = 0;
   5599 				for (i = 0; i < sc->sc_nqueues; i++) {
   5600 					wmq = &sc->sc_queue[i];
   5601 					mask |= 1 << wmq->wmq_intr_idx;
   5602 				}
   5603 				mask |= 1 << sc->sc_link_intr_idx;
   5604 			}
   5605 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5606 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5607 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5608 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5609 			break;
   5610 		}
   5611 	} else
   5612 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5613 
   5614 	/* Set up the inter-packet gap. */
   5615 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5616 
   5617 	if (sc->sc_type >= WM_T_82543) {
   5618 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5619 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5620 			wm_itrs_writereg(sc, wmq);
   5621 		}
   5622 		/*
   5623 		 * Link interrupts occur much less than TX
   5624 		 * interrupts and RX interrupts. So, we don't
   5625 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5626 		 * FreeBSD's if_igb.
   5627 		 */
   5628 	}
   5629 
   5630 	/* Set the VLAN ethernetype. */
   5631 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5632 
   5633 	/*
   5634 	 * Set up the transmit control register; we start out with
   5635 	 * a collision distance suitable for FDX, but update it whe
   5636 	 * we resolve the media type.
   5637 	 */
   5638 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5639 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5640 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5641 	if (sc->sc_type >= WM_T_82571)
   5642 		sc->sc_tctl |= TCTL_MULR;
   5643 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5644 
   5645 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5646 		/* Write TDT after TCTL.EN is set. See the document. */
   5647 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5648 	}
   5649 
   5650 	if (sc->sc_type == WM_T_80003) {
   5651 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5652 		reg &= ~TCTL_EXT_GCEX_MASK;
   5653 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5654 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5655 	}
   5656 
   5657 	/* Set the media. */
   5658 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5659 		goto out;
   5660 
   5661 	/* Configure for OS presence */
   5662 	wm_init_manageability(sc);
   5663 
   5664 	/*
   5665 	 * Set up the receive control register; we actually program
   5666 	 * the register when we set the receive filter.  Use multicast
   5667 	 * address offset type 0.
   5668 	 *
   5669 	 * Only the i82544 has the ability to strip the incoming
   5670 	 * CRC, so we don't enable that feature.
   5671 	 */
   5672 	sc->sc_mchash_type = 0;
   5673 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5674 	    | RCTL_MO(sc->sc_mchash_type);
   5675 
   5676 	/*
   5677 	 * 82574 use one buffer extended Rx descriptor.
   5678 	 */
   5679 	if (sc->sc_type == WM_T_82574)
   5680 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5681 
   5682 	/*
   5683 	 * The I350 has a bug where it always strips the CRC whether
   5684 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5685 	 */
   5686 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5687 	    || (sc->sc_type == WM_T_I210))
   5688 		sc->sc_rctl |= RCTL_SECRC;
   5689 
   5690 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5691 	    && (ifp->if_mtu > ETHERMTU)) {
   5692 		sc->sc_rctl |= RCTL_LPE;
   5693 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5694 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5695 	}
   5696 
   5697 	if (MCLBYTES == 2048) {
   5698 		sc->sc_rctl |= RCTL_2k;
   5699 	} else {
   5700 		if (sc->sc_type >= WM_T_82543) {
   5701 			switch (MCLBYTES) {
   5702 			case 4096:
   5703 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5704 				break;
   5705 			case 8192:
   5706 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5707 				break;
   5708 			case 16384:
   5709 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5710 				break;
   5711 			default:
   5712 				panic("wm_init: MCLBYTES %d unsupported",
   5713 				    MCLBYTES);
   5714 				break;
   5715 			}
   5716 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5717 	}
   5718 
   5719 	/* Enable ECC */
   5720 	switch (sc->sc_type) {
   5721 	case WM_T_82571:
   5722 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5723 		reg |= PBA_ECC_CORR_EN;
   5724 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5725 		break;
   5726 	case WM_T_PCH_LPT:
   5727 	case WM_T_PCH_SPT:
   5728 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5729 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5730 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5731 
   5732 		sc->sc_ctrl |= CTRL_MEHE;
   5733 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5734 		break;
   5735 	default:
   5736 		break;
   5737 	}
   5738 
   5739 	/* On 575 and later set RDT only if RX enabled */
   5740 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5741 		int qidx;
   5742 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5743 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5744 			for (i = 0; i < WM_NRXDESC; i++) {
   5745 				mutex_enter(rxq->rxq_lock);
   5746 				wm_init_rxdesc(rxq, i);
   5747 				mutex_exit(rxq->rxq_lock);
   5748 
   5749 			}
   5750 		}
   5751 	}
   5752 
   5753 	/* Set the receive filter. */
   5754 	wm_set_filter(sc);
   5755 
   5756 	wm_turnon(sc);
   5757 
   5758 	/* Start the one second link check clock. */
   5759 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5760 
   5761 	/* ...all done! */
   5762 	ifp->if_flags |= IFF_RUNNING;
   5763 	ifp->if_flags &= ~IFF_OACTIVE;
   5764 
   5765  out:
   5766 	sc->sc_if_flags = ifp->if_flags;
   5767 	if (error)
   5768 		log(LOG_ERR, "%s: interface not running\n",
   5769 		    device_xname(sc->sc_dev));
   5770 	return error;
   5771 }
   5772 
   5773 /*
   5774  * wm_stop:		[ifnet interface function]
   5775  *
   5776  *	Stop transmission on the interface.
   5777  */
   5778 static void
   5779 wm_stop(struct ifnet *ifp, int disable)
   5780 {
   5781 	struct wm_softc *sc = ifp->if_softc;
   5782 
   5783 	WM_CORE_LOCK(sc);
   5784 	wm_stop_locked(ifp, disable);
   5785 	WM_CORE_UNLOCK(sc);
   5786 }
   5787 
   5788 static void
   5789 wm_stop_locked(struct ifnet *ifp, int disable)
   5790 {
   5791 	struct wm_softc *sc = ifp->if_softc;
   5792 	struct wm_txsoft *txs;
   5793 	int i, qidx;
   5794 
   5795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5796 		device_xname(sc->sc_dev), __func__));
   5797 	KASSERT(WM_CORE_LOCKED(sc));
   5798 
   5799 	wm_turnoff(sc);
   5800 
   5801 	/* Stop the one second clock. */
   5802 	callout_stop(&sc->sc_tick_ch);
   5803 
   5804 	/* Stop the 82547 Tx FIFO stall check timer. */
   5805 	if (sc->sc_type == WM_T_82547)
   5806 		callout_stop(&sc->sc_txfifo_ch);
   5807 
   5808 	if (sc->sc_flags & WM_F_HAS_MII) {
   5809 		/* Down the MII. */
   5810 		mii_down(&sc->sc_mii);
   5811 	} else {
   5812 #if 0
   5813 		/* Should we clear PHY's status properly? */
   5814 		wm_reset(sc);
   5815 #endif
   5816 	}
   5817 
   5818 	/* Stop the transmit and receive processes. */
   5819 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5820 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5821 	sc->sc_rctl &= ~RCTL_EN;
   5822 
   5823 	/*
   5824 	 * Clear the interrupt mask to ensure the device cannot assert its
   5825 	 * interrupt line.
   5826 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5827 	 * service any currently pending or shared interrupt.
   5828 	 */
   5829 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5830 	sc->sc_icr = 0;
   5831 	if (wm_is_using_msix(sc)) {
   5832 		if (sc->sc_type != WM_T_82574) {
   5833 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5834 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5835 		} else
   5836 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5837 	}
   5838 
   5839 	/* Release any queued transmit buffers. */
   5840 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5841 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5842 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5843 		mutex_enter(txq->txq_lock);
   5844 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5845 			txs = &txq->txq_soft[i];
   5846 			if (txs->txs_mbuf != NULL) {
   5847 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5848 				m_freem(txs->txs_mbuf);
   5849 				txs->txs_mbuf = NULL;
   5850 			}
   5851 		}
   5852 		mutex_exit(txq->txq_lock);
   5853 	}
   5854 
   5855 	/* Mark the interface as down and cancel the watchdog timer. */
   5856 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5857 	ifp->if_timer = 0;
   5858 
   5859 	if (disable) {
   5860 		for (i = 0; i < sc->sc_nqueues; i++) {
   5861 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5862 			mutex_enter(rxq->rxq_lock);
   5863 			wm_rxdrain(rxq);
   5864 			mutex_exit(rxq->rxq_lock);
   5865 		}
   5866 	}
   5867 
   5868 #if 0 /* notyet */
   5869 	if (sc->sc_type >= WM_T_82544)
   5870 		CSR_WRITE(sc, WMREG_WUC, 0);
   5871 #endif
   5872 }
   5873 
   5874 static void
   5875 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5876 {
   5877 	struct mbuf *m;
   5878 	int i;
   5879 
   5880 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5881 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5882 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5883 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5884 		    m->m_data, m->m_len, m->m_flags);
   5885 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5886 	    i, i == 1 ? "" : "s");
   5887 }
   5888 
   5889 /*
   5890  * wm_82547_txfifo_stall:
   5891  *
   5892  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5893  *	reset the FIFO pointers, and restart packet transmission.
   5894  */
   5895 static void
   5896 wm_82547_txfifo_stall(void *arg)
   5897 {
   5898 	struct wm_softc *sc = arg;
   5899 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5900 
   5901 	mutex_enter(txq->txq_lock);
   5902 
   5903 	if (txq->txq_stopping)
   5904 		goto out;
   5905 
   5906 	if (txq->txq_fifo_stall) {
   5907 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5908 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5909 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5910 			/*
   5911 			 * Packets have drained.  Stop transmitter, reset
   5912 			 * FIFO pointers, restart transmitter, and kick
   5913 			 * the packet queue.
   5914 			 */
   5915 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5916 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5917 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5918 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5919 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5920 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5921 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5922 			CSR_WRITE_FLUSH(sc);
   5923 
   5924 			txq->txq_fifo_head = 0;
   5925 			txq->txq_fifo_stall = 0;
   5926 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5927 		} else {
   5928 			/*
   5929 			 * Still waiting for packets to drain; try again in
   5930 			 * another tick.
   5931 			 */
   5932 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5933 		}
   5934 	}
   5935 
   5936 out:
   5937 	mutex_exit(txq->txq_lock);
   5938 }
   5939 
   5940 /*
   5941  * wm_82547_txfifo_bugchk:
   5942  *
   5943  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5944  *	prevent enqueueing a packet that would wrap around the end
   5945  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5946  *
   5947  *	We do this by checking the amount of space before the end
   5948  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5949  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5950  *	the internal FIFO pointers to the beginning, and restart
   5951  *	transmission on the interface.
   5952  */
   5953 #define	WM_FIFO_HDR		0x10
   5954 #define	WM_82547_PAD_LEN	0x3e0
   5955 static int
   5956 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5957 {
   5958 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5959 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5960 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5961 
   5962 	/* Just return if already stalled. */
   5963 	if (txq->txq_fifo_stall)
   5964 		return 1;
   5965 
   5966 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5967 		/* Stall only occurs in half-duplex mode. */
   5968 		goto send_packet;
   5969 	}
   5970 
   5971 	if (len >= WM_82547_PAD_LEN + space) {
   5972 		txq->txq_fifo_stall = 1;
   5973 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5974 		return 1;
   5975 	}
   5976 
   5977  send_packet:
   5978 	txq->txq_fifo_head += len;
   5979 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5980 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5981 
   5982 	return 0;
   5983 }
   5984 
   5985 static int
   5986 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5987 {
   5988 	int error;
   5989 
   5990 	/*
   5991 	 * Allocate the control data structures, and create and load the
   5992 	 * DMA map for it.
   5993 	 *
   5994 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5995 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5996 	 * both sets within the same 4G segment.
   5997 	 */
   5998 	if (sc->sc_type < WM_T_82544)
   5999 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6000 	else
   6001 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6002 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6003 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6004 	else
   6005 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6006 
   6007 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6008 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6009 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6010 		aprint_error_dev(sc->sc_dev,
   6011 		    "unable to allocate TX control data, error = %d\n",
   6012 		    error);
   6013 		goto fail_0;
   6014 	}
   6015 
   6016 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6017 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6018 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6019 		aprint_error_dev(sc->sc_dev,
   6020 		    "unable to map TX control data, error = %d\n", error);
   6021 		goto fail_1;
   6022 	}
   6023 
   6024 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6025 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6026 		aprint_error_dev(sc->sc_dev,
   6027 		    "unable to create TX control data DMA map, error = %d\n",
   6028 		    error);
   6029 		goto fail_2;
   6030 	}
   6031 
   6032 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6033 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6034 		aprint_error_dev(sc->sc_dev,
   6035 		    "unable to load TX control data DMA map, error = %d\n",
   6036 		    error);
   6037 		goto fail_3;
   6038 	}
   6039 
   6040 	return 0;
   6041 
   6042  fail_3:
   6043 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6044  fail_2:
   6045 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6046 	    WM_TXDESCS_SIZE(txq));
   6047  fail_1:
   6048 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6049  fail_0:
   6050 	return error;
   6051 }
   6052 
   6053 static void
   6054 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6055 {
   6056 
   6057 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6058 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6059 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6060 	    WM_TXDESCS_SIZE(txq));
   6061 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6062 }
   6063 
   6064 static int
   6065 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6066 {
   6067 	int error;
   6068 	size_t rxq_descs_size;
   6069 
   6070 	/*
   6071 	 * Allocate the control data structures, and create and load the
   6072 	 * DMA map for it.
   6073 	 *
   6074 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6075 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6076 	 * both sets within the same 4G segment.
   6077 	 */
   6078 	rxq->rxq_ndesc = WM_NRXDESC;
   6079 	if (sc->sc_type == WM_T_82574)
   6080 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6081 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6082 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6083 	else
   6084 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6085 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6086 
   6087 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6088 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6089 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6090 		aprint_error_dev(sc->sc_dev,
   6091 		    "unable to allocate RX control data, error = %d\n",
   6092 		    error);
   6093 		goto fail_0;
   6094 	}
   6095 
   6096 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6097 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6098 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6099 		aprint_error_dev(sc->sc_dev,
   6100 		    "unable to map RX control data, error = %d\n", error);
   6101 		goto fail_1;
   6102 	}
   6103 
   6104 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6105 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6106 		aprint_error_dev(sc->sc_dev,
   6107 		    "unable to create RX control data DMA map, error = %d\n",
   6108 		    error);
   6109 		goto fail_2;
   6110 	}
   6111 
   6112 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6113 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6114 		aprint_error_dev(sc->sc_dev,
   6115 		    "unable to load RX control data DMA map, error = %d\n",
   6116 		    error);
   6117 		goto fail_3;
   6118 	}
   6119 
   6120 	return 0;
   6121 
   6122  fail_3:
   6123 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6124  fail_2:
   6125 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6126 	    rxq_descs_size);
   6127  fail_1:
   6128 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6129  fail_0:
   6130 	return error;
   6131 }
   6132 
   6133 static void
   6134 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6135 {
   6136 
   6137 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6138 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6139 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6140 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6141 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6142 }
   6143 
   6144 
   6145 static int
   6146 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6147 {
   6148 	int i, error;
   6149 
   6150 	/* Create the transmit buffer DMA maps. */
   6151 	WM_TXQUEUELEN(txq) =
   6152 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6153 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6154 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6155 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6156 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6157 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6158 			aprint_error_dev(sc->sc_dev,
   6159 			    "unable to create Tx DMA map %d, error = %d\n",
   6160 			    i, error);
   6161 			goto fail;
   6162 		}
   6163 	}
   6164 
   6165 	return 0;
   6166 
   6167  fail:
   6168 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6169 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6170 			bus_dmamap_destroy(sc->sc_dmat,
   6171 			    txq->txq_soft[i].txs_dmamap);
   6172 	}
   6173 	return error;
   6174 }
   6175 
   6176 static void
   6177 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6178 {
   6179 	int i;
   6180 
   6181 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6182 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6183 			bus_dmamap_destroy(sc->sc_dmat,
   6184 			    txq->txq_soft[i].txs_dmamap);
   6185 	}
   6186 }
   6187 
   6188 static int
   6189 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6190 {
   6191 	int i, error;
   6192 
   6193 	/* Create the receive buffer DMA maps. */
   6194 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6195 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6196 			    MCLBYTES, 0, 0,
   6197 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6198 			aprint_error_dev(sc->sc_dev,
   6199 			    "unable to create Rx DMA map %d error = %d\n",
   6200 			    i, error);
   6201 			goto fail;
   6202 		}
   6203 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6204 	}
   6205 
   6206 	return 0;
   6207 
   6208  fail:
   6209 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6210 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6211 			bus_dmamap_destroy(sc->sc_dmat,
   6212 			    rxq->rxq_soft[i].rxs_dmamap);
   6213 	}
   6214 	return error;
   6215 }
   6216 
   6217 static void
   6218 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6219 {
   6220 	int i;
   6221 
   6222 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6223 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6224 			bus_dmamap_destroy(sc->sc_dmat,
   6225 			    rxq->rxq_soft[i].rxs_dmamap);
   6226 	}
   6227 }
   6228 
   6229 /*
   6230  * wm_alloc_quques:
   6231  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6232  */
   6233 static int
   6234 wm_alloc_txrx_queues(struct wm_softc *sc)
   6235 {
   6236 	int i, error, tx_done, rx_done;
   6237 
   6238 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6239 	    KM_SLEEP);
   6240 	if (sc->sc_queue == NULL) {
   6241 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6242 		error = ENOMEM;
   6243 		goto fail_0;
   6244 	}
   6245 
   6246 	/*
   6247 	 * For transmission
   6248 	 */
   6249 	error = 0;
   6250 	tx_done = 0;
   6251 	for (i = 0; i < sc->sc_nqueues; i++) {
   6252 #ifdef WM_EVENT_COUNTERS
   6253 		int j;
   6254 		const char *xname;
   6255 #endif
   6256 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6257 		txq->txq_sc = sc;
   6258 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6259 
   6260 		error = wm_alloc_tx_descs(sc, txq);
   6261 		if (error)
   6262 			break;
   6263 		error = wm_alloc_tx_buffer(sc, txq);
   6264 		if (error) {
   6265 			wm_free_tx_descs(sc, txq);
   6266 			break;
   6267 		}
   6268 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6269 		if (txq->txq_interq == NULL) {
   6270 			wm_free_tx_descs(sc, txq);
   6271 			wm_free_tx_buffer(sc, txq);
   6272 			error = ENOMEM;
   6273 			break;
   6274 		}
   6275 
   6276 #ifdef WM_EVENT_COUNTERS
   6277 		xname = device_xname(sc->sc_dev);
   6278 
   6279 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6280 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6281 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6282 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6283 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6284 
   6285 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6286 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6287 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6288 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6289 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6290 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6291 
   6292 		for (j = 0; j < WM_NTXSEGS; j++) {
   6293 			snprintf(txq->txq_txseg_evcnt_names[j],
   6294 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6295 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6296 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6297 		}
   6298 
   6299 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6300 
   6301 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6302 #endif /* WM_EVENT_COUNTERS */
   6303 
   6304 		tx_done++;
   6305 	}
   6306 	if (error)
   6307 		goto fail_1;
   6308 
   6309 	/*
   6310 	 * For recieve
   6311 	 */
   6312 	error = 0;
   6313 	rx_done = 0;
   6314 	for (i = 0; i < sc->sc_nqueues; i++) {
   6315 #ifdef WM_EVENT_COUNTERS
   6316 		const char *xname;
   6317 #endif
   6318 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6319 		rxq->rxq_sc = sc;
   6320 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6321 
   6322 		error = wm_alloc_rx_descs(sc, rxq);
   6323 		if (error)
   6324 			break;
   6325 
   6326 		error = wm_alloc_rx_buffer(sc, rxq);
   6327 		if (error) {
   6328 			wm_free_rx_descs(sc, rxq);
   6329 			break;
   6330 		}
   6331 
   6332 #ifdef WM_EVENT_COUNTERS
   6333 		xname = device_xname(sc->sc_dev);
   6334 
   6335 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6336 
   6337 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6338 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6339 #endif /* WM_EVENT_COUNTERS */
   6340 
   6341 		rx_done++;
   6342 	}
   6343 	if (error)
   6344 		goto fail_2;
   6345 
   6346 	return 0;
   6347 
   6348  fail_2:
   6349 	for (i = 0; i < rx_done; i++) {
   6350 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6351 		wm_free_rx_buffer(sc, rxq);
   6352 		wm_free_rx_descs(sc, rxq);
   6353 		if (rxq->rxq_lock)
   6354 			mutex_obj_free(rxq->rxq_lock);
   6355 	}
   6356  fail_1:
   6357 	for (i = 0; i < tx_done; i++) {
   6358 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6359 		pcq_destroy(txq->txq_interq);
   6360 		wm_free_tx_buffer(sc, txq);
   6361 		wm_free_tx_descs(sc, txq);
   6362 		if (txq->txq_lock)
   6363 			mutex_obj_free(txq->txq_lock);
   6364 	}
   6365 
   6366 	kmem_free(sc->sc_queue,
   6367 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6368  fail_0:
   6369 	return error;
   6370 }
   6371 
   6372 /*
   6373  * wm_free_quques:
   6374  *	Free {tx,rx}descs and {tx,rx} buffers
   6375  */
   6376 static void
   6377 wm_free_txrx_queues(struct wm_softc *sc)
   6378 {
   6379 	int i;
   6380 
   6381 	for (i = 0; i < sc->sc_nqueues; i++) {
   6382 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6383 
   6384 #ifdef WM_EVENT_COUNTERS
   6385 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6386 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6387 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6388 #endif /* WM_EVENT_COUNTERS */
   6389 
   6390 		wm_free_rx_buffer(sc, rxq);
   6391 		wm_free_rx_descs(sc, rxq);
   6392 		if (rxq->rxq_lock)
   6393 			mutex_obj_free(rxq->rxq_lock);
   6394 	}
   6395 
   6396 	for (i = 0; i < sc->sc_nqueues; i++) {
   6397 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6398 		struct mbuf *m;
   6399 #ifdef WM_EVENT_COUNTERS
   6400 		int j;
   6401 
   6402 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6403 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6404 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6405 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6406 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6407 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6408 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6409 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6410 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6411 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6412 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6413 
   6414 		for (j = 0; j < WM_NTXSEGS; j++)
   6415 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6416 
   6417 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6418 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6419 #endif /* WM_EVENT_COUNTERS */
   6420 
   6421 		/* drain txq_interq */
   6422 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6423 			m_freem(m);
   6424 		pcq_destroy(txq->txq_interq);
   6425 
   6426 		wm_free_tx_buffer(sc, txq);
   6427 		wm_free_tx_descs(sc, txq);
   6428 		if (txq->txq_lock)
   6429 			mutex_obj_free(txq->txq_lock);
   6430 	}
   6431 
   6432 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6433 }
   6434 
   6435 static void
   6436 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6437 {
   6438 
   6439 	KASSERT(mutex_owned(txq->txq_lock));
   6440 
   6441 	/* Initialize the transmit descriptor ring. */
   6442 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6443 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6444 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6445 	txq->txq_free = WM_NTXDESC(txq);
   6446 	txq->txq_next = 0;
   6447 }
   6448 
   6449 static void
   6450 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6451     struct wm_txqueue *txq)
   6452 {
   6453 
   6454 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6455 		device_xname(sc->sc_dev), __func__));
   6456 	KASSERT(mutex_owned(txq->txq_lock));
   6457 
   6458 	if (sc->sc_type < WM_T_82543) {
   6459 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6460 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6461 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6462 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6463 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6464 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6465 	} else {
   6466 		int qid = wmq->wmq_id;
   6467 
   6468 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6469 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6470 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6471 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6472 
   6473 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6474 			/*
   6475 			 * Don't write TDT before TCTL.EN is set.
   6476 			 * See the document.
   6477 			 */
   6478 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6479 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6480 			    | TXDCTL_WTHRESH(0));
   6481 		else {
   6482 			/* XXX should update with AIM? */
   6483 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6484 			if (sc->sc_type >= WM_T_82540) {
   6485 				/* should be same */
   6486 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6487 			}
   6488 
   6489 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6490 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6491 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6492 		}
   6493 	}
   6494 }
   6495 
   6496 static void
   6497 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6498 {
   6499 	int i;
   6500 
   6501 	KASSERT(mutex_owned(txq->txq_lock));
   6502 
   6503 	/* Initialize the transmit job descriptors. */
   6504 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6505 		txq->txq_soft[i].txs_mbuf = NULL;
   6506 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6507 	txq->txq_snext = 0;
   6508 	txq->txq_sdirty = 0;
   6509 }
   6510 
   6511 static void
   6512 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6513     struct wm_txqueue *txq)
   6514 {
   6515 
   6516 	KASSERT(mutex_owned(txq->txq_lock));
   6517 
   6518 	/*
   6519 	 * Set up some register offsets that are different between
   6520 	 * the i82542 and the i82543 and later chips.
   6521 	 */
   6522 	if (sc->sc_type < WM_T_82543)
   6523 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6524 	else
   6525 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6526 
   6527 	wm_init_tx_descs(sc, txq);
   6528 	wm_init_tx_regs(sc, wmq, txq);
   6529 	wm_init_tx_buffer(sc, txq);
   6530 }
   6531 
   6532 static void
   6533 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6534     struct wm_rxqueue *rxq)
   6535 {
   6536 
   6537 	KASSERT(mutex_owned(rxq->rxq_lock));
   6538 
   6539 	/*
   6540 	 * Initialize the receive descriptor and receive job
   6541 	 * descriptor rings.
   6542 	 */
   6543 	if (sc->sc_type < WM_T_82543) {
   6544 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6545 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6546 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6547 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6548 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6549 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6550 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6551 
   6552 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6553 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6554 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6555 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6556 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6557 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6558 	} else {
   6559 		int qid = wmq->wmq_id;
   6560 
   6561 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6562 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6563 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6564 
   6565 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6566 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6567 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6568 
   6569 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6570 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6571 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6572 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6573 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6574 			    | RXDCTL_WTHRESH(1));
   6575 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6576 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6577 		} else {
   6578 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6579 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6580 			/* XXX should update with AIM? */
   6581 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6582 			/* MUST be same */
   6583 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6584 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6585 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6586 		}
   6587 	}
   6588 }
   6589 
   6590 static int
   6591 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6592 {
   6593 	struct wm_rxsoft *rxs;
   6594 	int error, i;
   6595 
   6596 	KASSERT(mutex_owned(rxq->rxq_lock));
   6597 
   6598 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6599 		rxs = &rxq->rxq_soft[i];
   6600 		if (rxs->rxs_mbuf == NULL) {
   6601 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6602 				log(LOG_ERR, "%s: unable to allocate or map "
   6603 				    "rx buffer %d, error = %d\n",
   6604 				    device_xname(sc->sc_dev), i, error);
   6605 				/*
   6606 				 * XXX Should attempt to run with fewer receive
   6607 				 * XXX buffers instead of just failing.
   6608 				 */
   6609 				wm_rxdrain(rxq);
   6610 				return ENOMEM;
   6611 			}
   6612 		} else {
   6613 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6614 				wm_init_rxdesc(rxq, i);
   6615 			/*
   6616 			 * For 82575 and newer device, the RX descriptors
   6617 			 * must be initialized after the setting of RCTL.EN in
   6618 			 * wm_set_filter()
   6619 			 */
   6620 		}
   6621 	}
   6622 	rxq->rxq_ptr = 0;
   6623 	rxq->rxq_discard = 0;
   6624 	WM_RXCHAIN_RESET(rxq);
   6625 
   6626 	return 0;
   6627 }
   6628 
   6629 static int
   6630 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6631     struct wm_rxqueue *rxq)
   6632 {
   6633 
   6634 	KASSERT(mutex_owned(rxq->rxq_lock));
   6635 
   6636 	/*
   6637 	 * Set up some register offsets that are different between
   6638 	 * the i82542 and the i82543 and later chips.
   6639 	 */
   6640 	if (sc->sc_type < WM_T_82543)
   6641 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6642 	else
   6643 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6644 
   6645 	wm_init_rx_regs(sc, wmq, rxq);
   6646 	return wm_init_rx_buffer(sc, rxq);
   6647 }
   6648 
   6649 /*
   6650  * wm_init_quques:
   6651  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6652  */
   6653 static int
   6654 wm_init_txrx_queues(struct wm_softc *sc)
   6655 {
   6656 	int i, error = 0;
   6657 
   6658 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6659 		device_xname(sc->sc_dev), __func__));
   6660 
   6661 	for (i = 0; i < sc->sc_nqueues; i++) {
   6662 		struct wm_queue *wmq = &sc->sc_queue[i];
   6663 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6664 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6665 
   6666 		/*
   6667 		 * TODO
   6668 		 * Currently, use constant variable instead of AIM.
   6669 		 * Furthermore, the interrupt interval of multiqueue which use
   6670 		 * polling mode is less than default value.
   6671 		 * More tuning and AIM are required.
   6672 		 */
   6673 		if (wm_is_using_multiqueue(sc))
   6674 			wmq->wmq_itr = 50;
   6675 		else
   6676 			wmq->wmq_itr = sc->sc_itr_init;
   6677 		wmq->wmq_set_itr = true;
   6678 
   6679 		mutex_enter(txq->txq_lock);
   6680 		wm_init_tx_queue(sc, wmq, txq);
   6681 		mutex_exit(txq->txq_lock);
   6682 
   6683 		mutex_enter(rxq->rxq_lock);
   6684 		error = wm_init_rx_queue(sc, wmq, rxq);
   6685 		mutex_exit(rxq->rxq_lock);
   6686 		if (error)
   6687 			break;
   6688 	}
   6689 
   6690 	return error;
   6691 }
   6692 
   6693 /*
   6694  * wm_tx_offload:
   6695  *
   6696  *	Set up TCP/IP checksumming parameters for the
   6697  *	specified packet.
   6698  */
   6699 static int
   6700 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6701     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6702 {
   6703 	struct mbuf *m0 = txs->txs_mbuf;
   6704 	struct livengood_tcpip_ctxdesc *t;
   6705 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6706 	uint32_t ipcse;
   6707 	struct ether_header *eh;
   6708 	int offset, iphl;
   6709 	uint8_t fields;
   6710 
   6711 	/*
   6712 	 * XXX It would be nice if the mbuf pkthdr had offset
   6713 	 * fields for the protocol headers.
   6714 	 */
   6715 
   6716 	eh = mtod(m0, struct ether_header *);
   6717 	switch (htons(eh->ether_type)) {
   6718 	case ETHERTYPE_IP:
   6719 	case ETHERTYPE_IPV6:
   6720 		offset = ETHER_HDR_LEN;
   6721 		break;
   6722 
   6723 	case ETHERTYPE_VLAN:
   6724 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6725 		break;
   6726 
   6727 	default:
   6728 		/*
   6729 		 * Don't support this protocol or encapsulation.
   6730 		 */
   6731 		*fieldsp = 0;
   6732 		*cmdp = 0;
   6733 		return 0;
   6734 	}
   6735 
   6736 	if ((m0->m_pkthdr.csum_flags &
   6737 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6738 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6739 	} else {
   6740 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6741 	}
   6742 	ipcse = offset + iphl - 1;
   6743 
   6744 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6745 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6746 	seg = 0;
   6747 	fields = 0;
   6748 
   6749 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6750 		int hlen = offset + iphl;
   6751 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6752 
   6753 		if (__predict_false(m0->m_len <
   6754 				    (hlen + sizeof(struct tcphdr)))) {
   6755 			/*
   6756 			 * TCP/IP headers are not in the first mbuf; we need
   6757 			 * to do this the slow and painful way.  Let's just
   6758 			 * hope this doesn't happen very often.
   6759 			 */
   6760 			struct tcphdr th;
   6761 
   6762 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6763 
   6764 			m_copydata(m0, hlen, sizeof(th), &th);
   6765 			if (v4) {
   6766 				struct ip ip;
   6767 
   6768 				m_copydata(m0, offset, sizeof(ip), &ip);
   6769 				ip.ip_len = 0;
   6770 				m_copyback(m0,
   6771 				    offset + offsetof(struct ip, ip_len),
   6772 				    sizeof(ip.ip_len), &ip.ip_len);
   6773 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6774 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6775 			} else {
   6776 				struct ip6_hdr ip6;
   6777 
   6778 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6779 				ip6.ip6_plen = 0;
   6780 				m_copyback(m0,
   6781 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6782 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6783 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6784 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6785 			}
   6786 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6787 			    sizeof(th.th_sum), &th.th_sum);
   6788 
   6789 			hlen += th.th_off << 2;
   6790 		} else {
   6791 			/*
   6792 			 * TCP/IP headers are in the first mbuf; we can do
   6793 			 * this the easy way.
   6794 			 */
   6795 			struct tcphdr *th;
   6796 
   6797 			if (v4) {
   6798 				struct ip *ip =
   6799 				    (void *)(mtod(m0, char *) + offset);
   6800 				th = (void *)(mtod(m0, char *) + hlen);
   6801 
   6802 				ip->ip_len = 0;
   6803 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6804 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6805 			} else {
   6806 				struct ip6_hdr *ip6 =
   6807 				    (void *)(mtod(m0, char *) + offset);
   6808 				th = (void *)(mtod(m0, char *) + hlen);
   6809 
   6810 				ip6->ip6_plen = 0;
   6811 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6812 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6813 			}
   6814 			hlen += th->th_off << 2;
   6815 		}
   6816 
   6817 		if (v4) {
   6818 			WM_Q_EVCNT_INCR(txq, txtso);
   6819 			cmdlen |= WTX_TCPIP_CMD_IP;
   6820 		} else {
   6821 			WM_Q_EVCNT_INCR(txq, txtso6);
   6822 			ipcse = 0;
   6823 		}
   6824 		cmd |= WTX_TCPIP_CMD_TSE;
   6825 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6826 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6827 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6828 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6829 	}
   6830 
   6831 	/*
   6832 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6833 	 * offload feature, if we load the context descriptor, we
   6834 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6835 	 */
   6836 
   6837 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6838 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6839 	    WTX_TCPIP_IPCSE(ipcse);
   6840 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6841 		WM_Q_EVCNT_INCR(txq, txipsum);
   6842 		fields |= WTX_IXSM;
   6843 	}
   6844 
   6845 	offset += iphl;
   6846 
   6847 	if (m0->m_pkthdr.csum_flags &
   6848 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6849 		WM_Q_EVCNT_INCR(txq, txtusum);
   6850 		fields |= WTX_TXSM;
   6851 		tucs = WTX_TCPIP_TUCSS(offset) |
   6852 		    WTX_TCPIP_TUCSO(offset +
   6853 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6854 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6855 	} else if ((m0->m_pkthdr.csum_flags &
   6856 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6857 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6858 		fields |= WTX_TXSM;
   6859 		tucs = WTX_TCPIP_TUCSS(offset) |
   6860 		    WTX_TCPIP_TUCSO(offset +
   6861 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6862 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6863 	} else {
   6864 		/* Just initialize it to a valid TCP context. */
   6865 		tucs = WTX_TCPIP_TUCSS(offset) |
   6866 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6867 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6868 	}
   6869 
   6870 	/*
   6871 	 * We don't have to write context descriptor for every packet
   6872 	 * except for 82574. For 82574, we must write context descriptor
   6873 	 * for every packet when we use two descriptor queues.
   6874 	 * It would be overhead to write context descriptor for every packet,
   6875 	 * however it does not cause problems.
   6876 	 */
   6877 	/* Fill in the context descriptor. */
   6878 	t = (struct livengood_tcpip_ctxdesc *)
   6879 	    &txq->txq_descs[txq->txq_next];
   6880 	t->tcpip_ipcs = htole32(ipcs);
   6881 	t->tcpip_tucs = htole32(tucs);
   6882 	t->tcpip_cmdlen = htole32(cmdlen);
   6883 	t->tcpip_seg = htole32(seg);
   6884 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6885 
   6886 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6887 	txs->txs_ndesc++;
   6888 
   6889 	*cmdp = cmd;
   6890 	*fieldsp = fields;
   6891 
   6892 	return 0;
   6893 }
   6894 
   6895 static inline int
   6896 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6897 {
   6898 	struct wm_softc *sc = ifp->if_softc;
   6899 	u_int cpuid = cpu_index(curcpu());
   6900 
   6901 	/*
   6902 	 * Currently, simple distribute strategy.
   6903 	 * TODO:
   6904 	 * distribute by flowid(RSS has value).
   6905 	 */
   6906         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6907 }
   6908 
   6909 /*
   6910  * wm_start:		[ifnet interface function]
   6911  *
   6912  *	Start packet transmission on the interface.
   6913  */
   6914 static void
   6915 wm_start(struct ifnet *ifp)
   6916 {
   6917 	struct wm_softc *sc = ifp->if_softc;
   6918 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6919 
   6920 #ifdef WM_MPSAFE
   6921 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6922 #endif
   6923 	/*
   6924 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6925 	 */
   6926 
   6927 	mutex_enter(txq->txq_lock);
   6928 	if (!txq->txq_stopping)
   6929 		wm_start_locked(ifp);
   6930 	mutex_exit(txq->txq_lock);
   6931 }
   6932 
   6933 static void
   6934 wm_start_locked(struct ifnet *ifp)
   6935 {
   6936 	struct wm_softc *sc = ifp->if_softc;
   6937 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6938 
   6939 	wm_send_common_locked(ifp, txq, false);
   6940 }
   6941 
   6942 static int
   6943 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6944 {
   6945 	int qid;
   6946 	struct wm_softc *sc = ifp->if_softc;
   6947 	struct wm_txqueue *txq;
   6948 
   6949 	qid = wm_select_txqueue(ifp, m);
   6950 	txq = &sc->sc_queue[qid].wmq_txq;
   6951 
   6952 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6953 		m_freem(m);
   6954 		WM_Q_EVCNT_INCR(txq, txdrop);
   6955 		return ENOBUFS;
   6956 	}
   6957 
   6958 	/*
   6959 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6960 	 */
   6961 	ifp->if_obytes += m->m_pkthdr.len;
   6962 	if (m->m_flags & M_MCAST)
   6963 		ifp->if_omcasts++;
   6964 
   6965 	if (mutex_tryenter(txq->txq_lock)) {
   6966 		if (!txq->txq_stopping)
   6967 			wm_transmit_locked(ifp, txq);
   6968 		mutex_exit(txq->txq_lock);
   6969 	}
   6970 
   6971 	return 0;
   6972 }
   6973 
   6974 static void
   6975 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6976 {
   6977 
   6978 	wm_send_common_locked(ifp, txq, true);
   6979 }
   6980 
   6981 static void
   6982 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6983     bool is_transmit)
   6984 {
   6985 	struct wm_softc *sc = ifp->if_softc;
   6986 	struct mbuf *m0;
   6987 	struct m_tag *mtag;
   6988 	struct wm_txsoft *txs;
   6989 	bus_dmamap_t dmamap;
   6990 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6991 	bus_addr_t curaddr;
   6992 	bus_size_t seglen, curlen;
   6993 	uint32_t cksumcmd;
   6994 	uint8_t cksumfields;
   6995 
   6996 	KASSERT(mutex_owned(txq->txq_lock));
   6997 
   6998 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6999 		return;
   7000 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7001 		return;
   7002 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7003 		return;
   7004 
   7005 	/* Remember the previous number of free descriptors. */
   7006 	ofree = txq->txq_free;
   7007 
   7008 	/*
   7009 	 * Loop through the send queue, setting up transmit descriptors
   7010 	 * until we drain the queue, or use up all available transmit
   7011 	 * descriptors.
   7012 	 */
   7013 	for (;;) {
   7014 		m0 = NULL;
   7015 
   7016 		/* Get a work queue entry. */
   7017 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7018 			wm_txeof(sc, txq);
   7019 			if (txq->txq_sfree == 0) {
   7020 				DPRINTF(WM_DEBUG_TX,
   7021 				    ("%s: TX: no free job descriptors\n",
   7022 					device_xname(sc->sc_dev)));
   7023 				WM_Q_EVCNT_INCR(txq, txsstall);
   7024 				break;
   7025 			}
   7026 		}
   7027 
   7028 		/* Grab a packet off the queue. */
   7029 		if (is_transmit)
   7030 			m0 = pcq_get(txq->txq_interq);
   7031 		else
   7032 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7033 		if (m0 == NULL)
   7034 			break;
   7035 
   7036 		DPRINTF(WM_DEBUG_TX,
   7037 		    ("%s: TX: have packet to transmit: %p\n",
   7038 		    device_xname(sc->sc_dev), m0));
   7039 
   7040 		txs = &txq->txq_soft[txq->txq_snext];
   7041 		dmamap = txs->txs_dmamap;
   7042 
   7043 		use_tso = (m0->m_pkthdr.csum_flags &
   7044 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7045 
   7046 		/*
   7047 		 * So says the Linux driver:
   7048 		 * The controller does a simple calculation to make sure
   7049 		 * there is enough room in the FIFO before initiating the
   7050 		 * DMA for each buffer.  The calc is:
   7051 		 *	4 = ceil(buffer len / MSS)
   7052 		 * To make sure we don't overrun the FIFO, adjust the max
   7053 		 * buffer len if the MSS drops.
   7054 		 */
   7055 		dmamap->dm_maxsegsz =
   7056 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7057 		    ? m0->m_pkthdr.segsz << 2
   7058 		    : WTX_MAX_LEN;
   7059 
   7060 		/*
   7061 		 * Load the DMA map.  If this fails, the packet either
   7062 		 * didn't fit in the allotted number of segments, or we
   7063 		 * were short on resources.  For the too-many-segments
   7064 		 * case, we simply report an error and drop the packet,
   7065 		 * since we can't sanely copy a jumbo packet to a single
   7066 		 * buffer.
   7067 		 */
   7068 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7069 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7070 		if (error) {
   7071 			if (error == EFBIG) {
   7072 				WM_Q_EVCNT_INCR(txq, txdrop);
   7073 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7074 				    "DMA segments, dropping...\n",
   7075 				    device_xname(sc->sc_dev));
   7076 				wm_dump_mbuf_chain(sc, m0);
   7077 				m_freem(m0);
   7078 				continue;
   7079 			}
   7080 			/*  Short on resources, just stop for now. */
   7081 			DPRINTF(WM_DEBUG_TX,
   7082 			    ("%s: TX: dmamap load failed: %d\n",
   7083 			    device_xname(sc->sc_dev), error));
   7084 			break;
   7085 		}
   7086 
   7087 		segs_needed = dmamap->dm_nsegs;
   7088 		if (use_tso) {
   7089 			/* For sentinel descriptor; see below. */
   7090 			segs_needed++;
   7091 		}
   7092 
   7093 		/*
   7094 		 * Ensure we have enough descriptors free to describe
   7095 		 * the packet.  Note, we always reserve one descriptor
   7096 		 * at the end of the ring due to the semantics of the
   7097 		 * TDT register, plus one more in the event we need
   7098 		 * to load offload context.
   7099 		 */
   7100 		if (segs_needed > txq->txq_free - 2) {
   7101 			/*
   7102 			 * Not enough free descriptors to transmit this
   7103 			 * packet.  We haven't committed anything yet,
   7104 			 * so just unload the DMA map, put the packet
   7105 			 * pack on the queue, and punt.  Notify the upper
   7106 			 * layer that there are no more slots left.
   7107 			 */
   7108 			DPRINTF(WM_DEBUG_TX,
   7109 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7110 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7111 			    segs_needed, txq->txq_free - 1));
   7112 			if (!is_transmit)
   7113 				ifp->if_flags |= IFF_OACTIVE;
   7114 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7115 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7116 			WM_Q_EVCNT_INCR(txq, txdstall);
   7117 			break;
   7118 		}
   7119 
   7120 		/*
   7121 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7122 		 * once we know we can transmit the packet, since we
   7123 		 * do some internal FIFO space accounting here.
   7124 		 */
   7125 		if (sc->sc_type == WM_T_82547 &&
   7126 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7127 			DPRINTF(WM_DEBUG_TX,
   7128 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7129 			    device_xname(sc->sc_dev)));
   7130 			if (!is_transmit)
   7131 				ifp->if_flags |= IFF_OACTIVE;
   7132 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7133 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7134 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7135 			break;
   7136 		}
   7137 
   7138 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7139 
   7140 		DPRINTF(WM_DEBUG_TX,
   7141 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7142 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7143 
   7144 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7145 
   7146 		/*
   7147 		 * Store a pointer to the packet so that we can free it
   7148 		 * later.
   7149 		 *
   7150 		 * Initially, we consider the number of descriptors the
   7151 		 * packet uses the number of DMA segments.  This may be
   7152 		 * incremented by 1 if we do checksum offload (a descriptor
   7153 		 * is used to set the checksum context).
   7154 		 */
   7155 		txs->txs_mbuf = m0;
   7156 		txs->txs_firstdesc = txq->txq_next;
   7157 		txs->txs_ndesc = segs_needed;
   7158 
   7159 		/* Set up offload parameters for this packet. */
   7160 		if (m0->m_pkthdr.csum_flags &
   7161 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7162 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7163 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7164 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7165 					  &cksumfields) != 0) {
   7166 				/* Error message already displayed. */
   7167 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7168 				continue;
   7169 			}
   7170 		} else {
   7171 			cksumcmd = 0;
   7172 			cksumfields = 0;
   7173 		}
   7174 
   7175 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7176 
   7177 		/* Sync the DMA map. */
   7178 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7179 		    BUS_DMASYNC_PREWRITE);
   7180 
   7181 		/* Initialize the transmit descriptor. */
   7182 		for (nexttx = txq->txq_next, seg = 0;
   7183 		     seg < dmamap->dm_nsegs; seg++) {
   7184 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7185 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7186 			     seglen != 0;
   7187 			     curaddr += curlen, seglen -= curlen,
   7188 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7189 				curlen = seglen;
   7190 
   7191 				/*
   7192 				 * So says the Linux driver:
   7193 				 * Work around for premature descriptor
   7194 				 * write-backs in TSO mode.  Append a
   7195 				 * 4-byte sentinel descriptor.
   7196 				 */
   7197 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7198 				    curlen > 8)
   7199 					curlen -= 4;
   7200 
   7201 				wm_set_dma_addr(
   7202 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7203 				txq->txq_descs[nexttx].wtx_cmdlen
   7204 				    = htole32(cksumcmd | curlen);
   7205 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7206 				    = 0;
   7207 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7208 				    = cksumfields;
   7209 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7210 				lasttx = nexttx;
   7211 
   7212 				DPRINTF(WM_DEBUG_TX,
   7213 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7214 				     "len %#04zx\n",
   7215 				    device_xname(sc->sc_dev), nexttx,
   7216 				    (uint64_t)curaddr, curlen));
   7217 			}
   7218 		}
   7219 
   7220 		KASSERT(lasttx != -1);
   7221 
   7222 		/*
   7223 		 * Set up the command byte on the last descriptor of
   7224 		 * the packet.  If we're in the interrupt delay window,
   7225 		 * delay the interrupt.
   7226 		 */
   7227 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7228 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7229 
   7230 		/*
   7231 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7232 		 * up the descriptor to encapsulate the packet for us.
   7233 		 *
   7234 		 * This is only valid on the last descriptor of the packet.
   7235 		 */
   7236 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7237 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7238 			    htole32(WTX_CMD_VLE);
   7239 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7240 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7241 		}
   7242 
   7243 		txs->txs_lastdesc = lasttx;
   7244 
   7245 		DPRINTF(WM_DEBUG_TX,
   7246 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7247 		    device_xname(sc->sc_dev),
   7248 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7249 
   7250 		/* Sync the descriptors we're using. */
   7251 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7252 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7253 
   7254 		/* Give the packet to the chip. */
   7255 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7256 
   7257 		DPRINTF(WM_DEBUG_TX,
   7258 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7259 
   7260 		DPRINTF(WM_DEBUG_TX,
   7261 		    ("%s: TX: finished transmitting packet, job %d\n",
   7262 		    device_xname(sc->sc_dev), txq->txq_snext));
   7263 
   7264 		/* Advance the tx pointer. */
   7265 		txq->txq_free -= txs->txs_ndesc;
   7266 		txq->txq_next = nexttx;
   7267 
   7268 		txq->txq_sfree--;
   7269 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7270 
   7271 		/* Pass the packet to any BPF listeners. */
   7272 		bpf_mtap(ifp, m0);
   7273 	}
   7274 
   7275 	if (m0 != NULL) {
   7276 		if (!is_transmit)
   7277 			ifp->if_flags |= IFF_OACTIVE;
   7278 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7279 		WM_Q_EVCNT_INCR(txq, txdrop);
   7280 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7281 			__func__));
   7282 		m_freem(m0);
   7283 	}
   7284 
   7285 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7286 		/* No more slots; notify upper layer. */
   7287 		if (!is_transmit)
   7288 			ifp->if_flags |= IFF_OACTIVE;
   7289 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7290 	}
   7291 
   7292 	if (txq->txq_free != ofree) {
   7293 		/* Set a watchdog timer in case the chip flakes out. */
   7294 		ifp->if_timer = 5;
   7295 	}
   7296 }
   7297 
   7298 /*
   7299  * wm_nq_tx_offload:
   7300  *
   7301  *	Set up TCP/IP checksumming parameters for the
   7302  *	specified packet, for NEWQUEUE devices
   7303  */
   7304 static int
   7305 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7306     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7307 {
   7308 	struct mbuf *m0 = txs->txs_mbuf;
   7309 	struct m_tag *mtag;
   7310 	uint32_t vl_len, mssidx, cmdc;
   7311 	struct ether_header *eh;
   7312 	int offset, iphl;
   7313 
   7314 	/*
   7315 	 * XXX It would be nice if the mbuf pkthdr had offset
   7316 	 * fields for the protocol headers.
   7317 	 */
   7318 	*cmdlenp = 0;
   7319 	*fieldsp = 0;
   7320 
   7321 	eh = mtod(m0, struct ether_header *);
   7322 	switch (htons(eh->ether_type)) {
   7323 	case ETHERTYPE_IP:
   7324 	case ETHERTYPE_IPV6:
   7325 		offset = ETHER_HDR_LEN;
   7326 		break;
   7327 
   7328 	case ETHERTYPE_VLAN:
   7329 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7330 		break;
   7331 
   7332 	default:
   7333 		/* Don't support this protocol or encapsulation. */
   7334 		*do_csum = false;
   7335 		return 0;
   7336 	}
   7337 	*do_csum = true;
   7338 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7339 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7340 
   7341 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7342 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7343 
   7344 	if ((m0->m_pkthdr.csum_flags &
   7345 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7346 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7347 	} else {
   7348 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7349 	}
   7350 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7351 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7352 
   7353 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7354 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7355 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7356 		*cmdlenp |= NQTX_CMD_VLE;
   7357 	}
   7358 
   7359 	mssidx = 0;
   7360 
   7361 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7362 		int hlen = offset + iphl;
   7363 		int tcp_hlen;
   7364 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7365 
   7366 		if (__predict_false(m0->m_len <
   7367 				    (hlen + sizeof(struct tcphdr)))) {
   7368 			/*
   7369 			 * TCP/IP headers are not in the first mbuf; we need
   7370 			 * to do this the slow and painful way.  Let's just
   7371 			 * hope this doesn't happen very often.
   7372 			 */
   7373 			struct tcphdr th;
   7374 
   7375 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7376 
   7377 			m_copydata(m0, hlen, sizeof(th), &th);
   7378 			if (v4) {
   7379 				struct ip ip;
   7380 
   7381 				m_copydata(m0, offset, sizeof(ip), &ip);
   7382 				ip.ip_len = 0;
   7383 				m_copyback(m0,
   7384 				    offset + offsetof(struct ip, ip_len),
   7385 				    sizeof(ip.ip_len), &ip.ip_len);
   7386 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7387 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7388 			} else {
   7389 				struct ip6_hdr ip6;
   7390 
   7391 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7392 				ip6.ip6_plen = 0;
   7393 				m_copyback(m0,
   7394 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7395 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7396 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7397 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7398 			}
   7399 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7400 			    sizeof(th.th_sum), &th.th_sum);
   7401 
   7402 			tcp_hlen = th.th_off << 2;
   7403 		} else {
   7404 			/*
   7405 			 * TCP/IP headers are in the first mbuf; we can do
   7406 			 * this the easy way.
   7407 			 */
   7408 			struct tcphdr *th;
   7409 
   7410 			if (v4) {
   7411 				struct ip *ip =
   7412 				    (void *)(mtod(m0, char *) + offset);
   7413 				th = (void *)(mtod(m0, char *) + hlen);
   7414 
   7415 				ip->ip_len = 0;
   7416 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7417 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7418 			} else {
   7419 				struct ip6_hdr *ip6 =
   7420 				    (void *)(mtod(m0, char *) + offset);
   7421 				th = (void *)(mtod(m0, char *) + hlen);
   7422 
   7423 				ip6->ip6_plen = 0;
   7424 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7425 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7426 			}
   7427 			tcp_hlen = th->th_off << 2;
   7428 		}
   7429 		hlen += tcp_hlen;
   7430 		*cmdlenp |= NQTX_CMD_TSE;
   7431 
   7432 		if (v4) {
   7433 			WM_Q_EVCNT_INCR(txq, txtso);
   7434 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7435 		} else {
   7436 			WM_Q_EVCNT_INCR(txq, txtso6);
   7437 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7438 		}
   7439 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7440 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7441 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7442 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7443 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7444 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7445 	} else {
   7446 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7447 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7448 	}
   7449 
   7450 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7451 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7452 		cmdc |= NQTXC_CMD_IP4;
   7453 	}
   7454 
   7455 	if (m0->m_pkthdr.csum_flags &
   7456 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7457 		WM_Q_EVCNT_INCR(txq, txtusum);
   7458 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7459 			cmdc |= NQTXC_CMD_TCP;
   7460 		} else {
   7461 			cmdc |= NQTXC_CMD_UDP;
   7462 		}
   7463 		cmdc |= NQTXC_CMD_IP4;
   7464 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7465 	}
   7466 	if (m0->m_pkthdr.csum_flags &
   7467 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7468 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7469 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7470 			cmdc |= NQTXC_CMD_TCP;
   7471 		} else {
   7472 			cmdc |= NQTXC_CMD_UDP;
   7473 		}
   7474 		cmdc |= NQTXC_CMD_IP6;
   7475 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7476 	}
   7477 
   7478 	/*
   7479 	 * We don't have to write context descriptor for every packet to
   7480 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7481 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7482 	 * controllers.
   7483 	 * It would be overhead to write context descriptor for every packet,
   7484 	 * however it does not cause problems.
   7485 	 */
   7486 	/* Fill in the context descriptor. */
   7487 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7488 	    htole32(vl_len);
   7489 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7490 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7491 	    htole32(cmdc);
   7492 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7493 	    htole32(mssidx);
   7494 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7495 	DPRINTF(WM_DEBUG_TX,
   7496 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7497 	    txq->txq_next, 0, vl_len));
   7498 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7499 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7500 	txs->txs_ndesc++;
   7501 	return 0;
   7502 }
   7503 
   7504 /*
   7505  * wm_nq_start:		[ifnet interface function]
   7506  *
   7507  *	Start packet transmission on the interface for NEWQUEUE devices
   7508  */
   7509 static void
   7510 wm_nq_start(struct ifnet *ifp)
   7511 {
   7512 	struct wm_softc *sc = ifp->if_softc;
   7513 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7514 
   7515 #ifdef WM_MPSAFE
   7516 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7517 #endif
   7518 	/*
   7519 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7520 	 */
   7521 
   7522 	mutex_enter(txq->txq_lock);
   7523 	if (!txq->txq_stopping)
   7524 		wm_nq_start_locked(ifp);
   7525 	mutex_exit(txq->txq_lock);
   7526 }
   7527 
   7528 static void
   7529 wm_nq_start_locked(struct ifnet *ifp)
   7530 {
   7531 	struct wm_softc *sc = ifp->if_softc;
   7532 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7533 
   7534 	wm_nq_send_common_locked(ifp, txq, false);
   7535 }
   7536 
   7537 static int
   7538 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7539 {
   7540 	int qid;
   7541 	struct wm_softc *sc = ifp->if_softc;
   7542 	struct wm_txqueue *txq;
   7543 
   7544 	qid = wm_select_txqueue(ifp, m);
   7545 	txq = &sc->sc_queue[qid].wmq_txq;
   7546 
   7547 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7548 		m_freem(m);
   7549 		WM_Q_EVCNT_INCR(txq, txdrop);
   7550 		return ENOBUFS;
   7551 	}
   7552 
   7553 	/*
   7554 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7555 	 */
   7556 	ifp->if_obytes += m->m_pkthdr.len;
   7557 	if (m->m_flags & M_MCAST)
   7558 		ifp->if_omcasts++;
   7559 
   7560 	/*
   7561 	 * The situations which this mutex_tryenter() fails at running time
   7562 	 * are below two patterns.
   7563 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7564 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7565 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7566 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7567 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7568 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7569 	 */
   7570 	if (mutex_tryenter(txq->txq_lock)) {
   7571 		if (!txq->txq_stopping)
   7572 			wm_nq_transmit_locked(ifp, txq);
   7573 		mutex_exit(txq->txq_lock);
   7574 	}
   7575 
   7576 	return 0;
   7577 }
   7578 
   7579 static void
   7580 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7581 {
   7582 
   7583 	wm_nq_send_common_locked(ifp, txq, true);
   7584 }
   7585 
   7586 static void
   7587 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7588     bool is_transmit)
   7589 {
   7590 	struct wm_softc *sc = ifp->if_softc;
   7591 	struct mbuf *m0;
   7592 	struct m_tag *mtag;
   7593 	struct wm_txsoft *txs;
   7594 	bus_dmamap_t dmamap;
   7595 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7596 	bool do_csum, sent;
   7597 
   7598 	KASSERT(mutex_owned(txq->txq_lock));
   7599 
   7600 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7601 		return;
   7602 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7603 		return;
   7604 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7605 		return;
   7606 
   7607 	sent = false;
   7608 
   7609 	/*
   7610 	 * Loop through the send queue, setting up transmit descriptors
   7611 	 * until we drain the queue, or use up all available transmit
   7612 	 * descriptors.
   7613 	 */
   7614 	for (;;) {
   7615 		m0 = NULL;
   7616 
   7617 		/* Get a work queue entry. */
   7618 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7619 			wm_txeof(sc, txq);
   7620 			if (txq->txq_sfree == 0) {
   7621 				DPRINTF(WM_DEBUG_TX,
   7622 				    ("%s: TX: no free job descriptors\n",
   7623 					device_xname(sc->sc_dev)));
   7624 				WM_Q_EVCNT_INCR(txq, txsstall);
   7625 				break;
   7626 			}
   7627 		}
   7628 
   7629 		/* Grab a packet off the queue. */
   7630 		if (is_transmit)
   7631 			m0 = pcq_get(txq->txq_interq);
   7632 		else
   7633 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7634 		if (m0 == NULL)
   7635 			break;
   7636 
   7637 		DPRINTF(WM_DEBUG_TX,
   7638 		    ("%s: TX: have packet to transmit: %p\n",
   7639 		    device_xname(sc->sc_dev), m0));
   7640 
   7641 		txs = &txq->txq_soft[txq->txq_snext];
   7642 		dmamap = txs->txs_dmamap;
   7643 
   7644 		/*
   7645 		 * Load the DMA map.  If this fails, the packet either
   7646 		 * didn't fit in the allotted number of segments, or we
   7647 		 * were short on resources.  For the too-many-segments
   7648 		 * case, we simply report an error and drop the packet,
   7649 		 * since we can't sanely copy a jumbo packet to a single
   7650 		 * buffer.
   7651 		 */
   7652 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7653 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7654 		if (error) {
   7655 			if (error == EFBIG) {
   7656 				WM_Q_EVCNT_INCR(txq, txdrop);
   7657 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7658 				    "DMA segments, dropping...\n",
   7659 				    device_xname(sc->sc_dev));
   7660 				wm_dump_mbuf_chain(sc, m0);
   7661 				m_freem(m0);
   7662 				continue;
   7663 			}
   7664 			/* Short on resources, just stop for now. */
   7665 			DPRINTF(WM_DEBUG_TX,
   7666 			    ("%s: TX: dmamap load failed: %d\n",
   7667 			    device_xname(sc->sc_dev), error));
   7668 			break;
   7669 		}
   7670 
   7671 		segs_needed = dmamap->dm_nsegs;
   7672 
   7673 		/*
   7674 		 * Ensure we have enough descriptors free to describe
   7675 		 * the packet.  Note, we always reserve one descriptor
   7676 		 * at the end of the ring due to the semantics of the
   7677 		 * TDT register, plus one more in the event we need
   7678 		 * to load offload context.
   7679 		 */
   7680 		if (segs_needed > txq->txq_free - 2) {
   7681 			/*
   7682 			 * Not enough free descriptors to transmit this
   7683 			 * packet.  We haven't committed anything yet,
   7684 			 * so just unload the DMA map, put the packet
   7685 			 * pack on the queue, and punt.  Notify the upper
   7686 			 * layer that there are no more slots left.
   7687 			 */
   7688 			DPRINTF(WM_DEBUG_TX,
   7689 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7690 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7691 			    segs_needed, txq->txq_free - 1));
   7692 			if (!is_transmit)
   7693 				ifp->if_flags |= IFF_OACTIVE;
   7694 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7695 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7696 			WM_Q_EVCNT_INCR(txq, txdstall);
   7697 			break;
   7698 		}
   7699 
   7700 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7701 
   7702 		DPRINTF(WM_DEBUG_TX,
   7703 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7704 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7705 
   7706 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7707 
   7708 		/*
   7709 		 * Store a pointer to the packet so that we can free it
   7710 		 * later.
   7711 		 *
   7712 		 * Initially, we consider the number of descriptors the
   7713 		 * packet uses the number of DMA segments.  This may be
   7714 		 * incremented by 1 if we do checksum offload (a descriptor
   7715 		 * is used to set the checksum context).
   7716 		 */
   7717 		txs->txs_mbuf = m0;
   7718 		txs->txs_firstdesc = txq->txq_next;
   7719 		txs->txs_ndesc = segs_needed;
   7720 
   7721 		/* Set up offload parameters for this packet. */
   7722 		uint32_t cmdlen, fields, dcmdlen;
   7723 		if (m0->m_pkthdr.csum_flags &
   7724 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7725 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7726 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7727 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7728 			    &do_csum) != 0) {
   7729 				/* Error message already displayed. */
   7730 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7731 				continue;
   7732 			}
   7733 		} else {
   7734 			do_csum = false;
   7735 			cmdlen = 0;
   7736 			fields = 0;
   7737 		}
   7738 
   7739 		/* Sync the DMA map. */
   7740 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7741 		    BUS_DMASYNC_PREWRITE);
   7742 
   7743 		/* Initialize the first transmit descriptor. */
   7744 		nexttx = txq->txq_next;
   7745 		if (!do_csum) {
   7746 			/* setup a legacy descriptor */
   7747 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7748 			    dmamap->dm_segs[0].ds_addr);
   7749 			txq->txq_descs[nexttx].wtx_cmdlen =
   7750 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7751 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7752 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7753 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7754 			    NULL) {
   7755 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7756 				    htole32(WTX_CMD_VLE);
   7757 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7758 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7759 			} else {
   7760 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7761 			}
   7762 			dcmdlen = 0;
   7763 		} else {
   7764 			/* setup an advanced data descriptor */
   7765 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7766 			    htole64(dmamap->dm_segs[0].ds_addr);
   7767 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7768 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7769 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7770 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7771 			    htole32(fields);
   7772 			DPRINTF(WM_DEBUG_TX,
   7773 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7774 			    device_xname(sc->sc_dev), nexttx,
   7775 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7776 			DPRINTF(WM_DEBUG_TX,
   7777 			    ("\t 0x%08x%08x\n", fields,
   7778 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7779 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7780 		}
   7781 
   7782 		lasttx = nexttx;
   7783 		nexttx = WM_NEXTTX(txq, nexttx);
   7784 		/*
   7785 		 * fill in the next descriptors. legacy or adcanced format
   7786 		 * is the same here
   7787 		 */
   7788 		for (seg = 1; seg < dmamap->dm_nsegs;
   7789 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7790 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7791 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7792 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7793 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7794 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7795 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7796 			lasttx = nexttx;
   7797 
   7798 			DPRINTF(WM_DEBUG_TX,
   7799 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7800 			     "len %#04zx\n",
   7801 			    device_xname(sc->sc_dev), nexttx,
   7802 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7803 			    dmamap->dm_segs[seg].ds_len));
   7804 		}
   7805 
   7806 		KASSERT(lasttx != -1);
   7807 
   7808 		/*
   7809 		 * Set up the command byte on the last descriptor of
   7810 		 * the packet.  If we're in the interrupt delay window,
   7811 		 * delay the interrupt.
   7812 		 */
   7813 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7814 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7815 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7816 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7817 
   7818 		txs->txs_lastdesc = lasttx;
   7819 
   7820 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7821 		    device_xname(sc->sc_dev),
   7822 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7823 
   7824 		/* Sync the descriptors we're using. */
   7825 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7826 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7827 
   7828 		/* Give the packet to the chip. */
   7829 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7830 		sent = true;
   7831 
   7832 		DPRINTF(WM_DEBUG_TX,
   7833 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7834 
   7835 		DPRINTF(WM_DEBUG_TX,
   7836 		    ("%s: TX: finished transmitting packet, job %d\n",
   7837 		    device_xname(sc->sc_dev), txq->txq_snext));
   7838 
   7839 		/* Advance the tx pointer. */
   7840 		txq->txq_free -= txs->txs_ndesc;
   7841 		txq->txq_next = nexttx;
   7842 
   7843 		txq->txq_sfree--;
   7844 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7845 
   7846 		/* Pass the packet to any BPF listeners. */
   7847 		bpf_mtap(ifp, m0);
   7848 	}
   7849 
   7850 	if (m0 != NULL) {
   7851 		if (!is_transmit)
   7852 			ifp->if_flags |= IFF_OACTIVE;
   7853 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7854 		WM_Q_EVCNT_INCR(txq, txdrop);
   7855 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7856 			__func__));
   7857 		m_freem(m0);
   7858 	}
   7859 
   7860 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7861 		/* No more slots; notify upper layer. */
   7862 		if (!is_transmit)
   7863 			ifp->if_flags |= IFF_OACTIVE;
   7864 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7865 	}
   7866 
   7867 	if (sent) {
   7868 		/* Set a watchdog timer in case the chip flakes out. */
   7869 		ifp->if_timer = 5;
   7870 	}
   7871 }
   7872 
   7873 static void
   7874 wm_deferred_start_locked(struct wm_txqueue *txq)
   7875 {
   7876 	struct wm_softc *sc = txq->txq_sc;
   7877 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7878 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7879 	int qid = wmq->wmq_id;
   7880 
   7881 	KASSERT(mutex_owned(txq->txq_lock));
   7882 
   7883 	if (txq->txq_stopping) {
   7884 		mutex_exit(txq->txq_lock);
   7885 		return;
   7886 	}
   7887 
   7888 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7889 		/* XXX need for ALTQ or one CPU system */
   7890 		if (qid == 0)
   7891 			wm_nq_start_locked(ifp);
   7892 		wm_nq_transmit_locked(ifp, txq);
   7893 	} else {
   7894 		/* XXX need for ALTQ or one CPU system */
   7895 		if (qid == 0)
   7896 			wm_start_locked(ifp);
   7897 		wm_transmit_locked(ifp, txq);
   7898 	}
   7899 }
   7900 
   7901 /* Interrupt */
   7902 
   7903 /*
   7904  * wm_txeof:
   7905  *
   7906  *	Helper; handle transmit interrupts.
   7907  */
   7908 static int
   7909 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7910 {
   7911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7912 	struct wm_txsoft *txs;
   7913 	bool processed = false;
   7914 	int count = 0;
   7915 	int i;
   7916 	uint8_t status;
   7917 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7918 
   7919 	KASSERT(mutex_owned(txq->txq_lock));
   7920 
   7921 	if (txq->txq_stopping)
   7922 		return 0;
   7923 
   7924 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7925 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7926 	if (wmq->wmq_id == 0)
   7927 		ifp->if_flags &= ~IFF_OACTIVE;
   7928 
   7929 	/*
   7930 	 * Go through the Tx list and free mbufs for those
   7931 	 * frames which have been transmitted.
   7932 	 */
   7933 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7934 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7935 		txs = &txq->txq_soft[i];
   7936 
   7937 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7938 			device_xname(sc->sc_dev), i));
   7939 
   7940 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7941 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7942 
   7943 		status =
   7944 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7945 		if ((status & WTX_ST_DD) == 0) {
   7946 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7947 			    BUS_DMASYNC_PREREAD);
   7948 			break;
   7949 		}
   7950 
   7951 		processed = true;
   7952 		count++;
   7953 		DPRINTF(WM_DEBUG_TX,
   7954 		    ("%s: TX: job %d done: descs %d..%d\n",
   7955 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7956 		    txs->txs_lastdesc));
   7957 
   7958 		/*
   7959 		 * XXX We should probably be using the statistics
   7960 		 * XXX registers, but I don't know if they exist
   7961 		 * XXX on chips before the i82544.
   7962 		 */
   7963 
   7964 #ifdef WM_EVENT_COUNTERS
   7965 		if (status & WTX_ST_TU)
   7966 			WM_Q_EVCNT_INCR(txq, tu);
   7967 #endif /* WM_EVENT_COUNTERS */
   7968 
   7969 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7970 			ifp->if_oerrors++;
   7971 			if (status & WTX_ST_LC)
   7972 				log(LOG_WARNING, "%s: late collision\n",
   7973 				    device_xname(sc->sc_dev));
   7974 			else if (status & WTX_ST_EC) {
   7975 				ifp->if_collisions += 16;
   7976 				log(LOG_WARNING, "%s: excessive collisions\n",
   7977 				    device_xname(sc->sc_dev));
   7978 			}
   7979 		} else
   7980 			ifp->if_opackets++;
   7981 
   7982 		txq->txq_packets++;
   7983 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7984 
   7985 		txq->txq_free += txs->txs_ndesc;
   7986 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7987 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7988 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7989 		m_freem(txs->txs_mbuf);
   7990 		txs->txs_mbuf = NULL;
   7991 	}
   7992 
   7993 	/* Update the dirty transmit buffer pointer. */
   7994 	txq->txq_sdirty = i;
   7995 	DPRINTF(WM_DEBUG_TX,
   7996 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7997 
   7998 	if (count != 0)
   7999 		rnd_add_uint32(&sc->rnd_source, count);
   8000 
   8001 	/*
   8002 	 * If there are no more pending transmissions, cancel the watchdog
   8003 	 * timer.
   8004 	 */
   8005 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8006 		ifp->if_timer = 0;
   8007 
   8008 	return processed;
   8009 }
   8010 
   8011 static inline uint32_t
   8012 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8013 {
   8014 	struct wm_softc *sc = rxq->rxq_sc;
   8015 
   8016 	if (sc->sc_type == WM_T_82574)
   8017 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8018 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8019 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8020 	else
   8021 		return rxq->rxq_descs[idx].wrx_status;
   8022 }
   8023 
   8024 static inline uint32_t
   8025 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8026 {
   8027 	struct wm_softc *sc = rxq->rxq_sc;
   8028 
   8029 	if (sc->sc_type == WM_T_82574)
   8030 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8031 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8032 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8033 	else
   8034 		return rxq->rxq_descs[idx].wrx_errors;
   8035 }
   8036 
   8037 static inline uint16_t
   8038 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8039 {
   8040 	struct wm_softc *sc = rxq->rxq_sc;
   8041 
   8042 	if (sc->sc_type == WM_T_82574)
   8043 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8044 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8045 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8046 	else
   8047 		return rxq->rxq_descs[idx].wrx_special;
   8048 }
   8049 
   8050 static inline int
   8051 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8052 {
   8053 	struct wm_softc *sc = rxq->rxq_sc;
   8054 
   8055 	if (sc->sc_type == WM_T_82574)
   8056 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8057 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8058 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8059 	else
   8060 		return rxq->rxq_descs[idx].wrx_len;
   8061 }
   8062 
   8063 #ifdef WM_DEBUG
   8064 static inline uint32_t
   8065 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8066 {
   8067 	struct wm_softc *sc = rxq->rxq_sc;
   8068 
   8069 	if (sc->sc_type == WM_T_82574)
   8070 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8071 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8072 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8073 	else
   8074 		return 0;
   8075 }
   8076 
   8077 static inline uint8_t
   8078 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8079 {
   8080 	struct wm_softc *sc = rxq->rxq_sc;
   8081 
   8082 	if (sc->sc_type == WM_T_82574)
   8083 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8084 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8085 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8086 	else
   8087 		return 0;
   8088 }
   8089 #endif /* WM_DEBUG */
   8090 
   8091 static inline bool
   8092 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8093     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8094 {
   8095 
   8096 	if (sc->sc_type == WM_T_82574)
   8097 		return (status & ext_bit) != 0;
   8098 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8099 		return (status & nq_bit) != 0;
   8100 	else
   8101 		return (status & legacy_bit) != 0;
   8102 }
   8103 
   8104 static inline bool
   8105 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8106     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8107 {
   8108 
   8109 	if (sc->sc_type == WM_T_82574)
   8110 		return (error & ext_bit) != 0;
   8111 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8112 		return (error & nq_bit) != 0;
   8113 	else
   8114 		return (error & legacy_bit) != 0;
   8115 }
   8116 
   8117 static inline bool
   8118 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8119 {
   8120 
   8121 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8122 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8123 		return true;
   8124 	else
   8125 		return false;
   8126 }
   8127 
   8128 static inline bool
   8129 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8130 {
   8131 	struct wm_softc *sc = rxq->rxq_sc;
   8132 
   8133 	/* XXXX missing error bit for newqueue? */
   8134 	if (wm_rxdesc_is_set_error(sc, errors,
   8135 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8136 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8137 		NQRXC_ERROR_RXE)) {
   8138 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8139 			log(LOG_WARNING, "%s: symbol error\n",
   8140 			    device_xname(sc->sc_dev));
   8141 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8142 			log(LOG_WARNING, "%s: receive sequence error\n",
   8143 			    device_xname(sc->sc_dev));
   8144 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8145 			log(LOG_WARNING, "%s: CRC error\n",
   8146 			    device_xname(sc->sc_dev));
   8147 		return true;
   8148 	}
   8149 
   8150 	return false;
   8151 }
   8152 
   8153 static inline bool
   8154 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8155 {
   8156 	struct wm_softc *sc = rxq->rxq_sc;
   8157 
   8158 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8159 		NQRXC_STATUS_DD)) {
   8160 		/* We have processed all of the receive descriptors. */
   8161 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8162 		return false;
   8163 	}
   8164 
   8165 	return true;
   8166 }
   8167 
   8168 static inline bool
   8169 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8170     struct mbuf *m)
   8171 {
   8172 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8173 
   8174 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8175 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8176 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8177 	}
   8178 
   8179 	return true;
   8180 }
   8181 
   8182 static inline void
   8183 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8184     uint32_t errors, struct mbuf *m)
   8185 {
   8186 	struct wm_softc *sc = rxq->rxq_sc;
   8187 
   8188 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8189 		if (wm_rxdesc_is_set_status(sc, status,
   8190 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8191 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8192 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8193 			if (wm_rxdesc_is_set_error(sc, errors,
   8194 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8195 				m->m_pkthdr.csum_flags |=
   8196 					M_CSUM_IPv4_BAD;
   8197 		}
   8198 		if (wm_rxdesc_is_set_status(sc, status,
   8199 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8200 			/*
   8201 			 * Note: we don't know if this was TCP or UDP,
   8202 			 * so we just set both bits, and expect the
   8203 			 * upper layers to deal.
   8204 			 */
   8205 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8206 			m->m_pkthdr.csum_flags |=
   8207 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8208 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8209 			if (wm_rxdesc_is_set_error(sc, errors,
   8210 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8211 				m->m_pkthdr.csum_flags |=
   8212 					M_CSUM_TCP_UDP_BAD;
   8213 		}
   8214 	}
   8215 }
   8216 
   8217 /*
   8218  * wm_rxeof:
   8219  *
   8220  *	Helper; handle receive interrupts.
   8221  */
   8222 static void
   8223 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8224 {
   8225 	struct wm_softc *sc = rxq->rxq_sc;
   8226 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8227 	struct wm_rxsoft *rxs;
   8228 	struct mbuf *m;
   8229 	int i, len;
   8230 	int count = 0;
   8231 	uint32_t status, errors;
   8232 	uint16_t vlantag;
   8233 
   8234 	KASSERT(mutex_owned(rxq->rxq_lock));
   8235 
   8236 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8237 		if (limit-- == 0) {
   8238 			rxq->rxq_ptr = i;
   8239 			break;
   8240 		}
   8241 
   8242 		rxs = &rxq->rxq_soft[i];
   8243 
   8244 		DPRINTF(WM_DEBUG_RX,
   8245 		    ("%s: RX: checking descriptor %d\n",
   8246 		    device_xname(sc->sc_dev), i));
   8247 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8248 
   8249 		status = wm_rxdesc_get_status(rxq, i);
   8250 		errors = wm_rxdesc_get_errors(rxq, i);
   8251 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8252 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8253 #ifdef WM_DEBUG
   8254 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8255 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8256 #endif
   8257 
   8258 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8259 			/*
   8260 			 * Update the receive pointer holding rxq_lock
   8261 			 * consistent with increment counter.
   8262 			 */
   8263 			rxq->rxq_ptr = i;
   8264 			break;
   8265 		}
   8266 
   8267 		count++;
   8268 		if (__predict_false(rxq->rxq_discard)) {
   8269 			DPRINTF(WM_DEBUG_RX,
   8270 			    ("%s: RX: discarding contents of descriptor %d\n",
   8271 			    device_xname(sc->sc_dev), i));
   8272 			wm_init_rxdesc(rxq, i);
   8273 			if (wm_rxdesc_is_eop(rxq, status)) {
   8274 				/* Reset our state. */
   8275 				DPRINTF(WM_DEBUG_RX,
   8276 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8277 				    device_xname(sc->sc_dev)));
   8278 				rxq->rxq_discard = 0;
   8279 			}
   8280 			continue;
   8281 		}
   8282 
   8283 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8284 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8285 
   8286 		m = rxs->rxs_mbuf;
   8287 
   8288 		/*
   8289 		 * Add a new receive buffer to the ring, unless of
   8290 		 * course the length is zero. Treat the latter as a
   8291 		 * failed mapping.
   8292 		 */
   8293 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8294 			/*
   8295 			 * Failed, throw away what we've done so
   8296 			 * far, and discard the rest of the packet.
   8297 			 */
   8298 			ifp->if_ierrors++;
   8299 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8300 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8301 			wm_init_rxdesc(rxq, i);
   8302 			if (!wm_rxdesc_is_eop(rxq, status))
   8303 				rxq->rxq_discard = 1;
   8304 			if (rxq->rxq_head != NULL)
   8305 				m_freem(rxq->rxq_head);
   8306 			WM_RXCHAIN_RESET(rxq);
   8307 			DPRINTF(WM_DEBUG_RX,
   8308 			    ("%s: RX: Rx buffer allocation failed, "
   8309 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8310 			    rxq->rxq_discard ? " (discard)" : ""));
   8311 			continue;
   8312 		}
   8313 
   8314 		m->m_len = len;
   8315 		rxq->rxq_len += len;
   8316 		DPRINTF(WM_DEBUG_RX,
   8317 		    ("%s: RX: buffer at %p len %d\n",
   8318 		    device_xname(sc->sc_dev), m->m_data, len));
   8319 
   8320 		/* If this is not the end of the packet, keep looking. */
   8321 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8322 			WM_RXCHAIN_LINK(rxq, m);
   8323 			DPRINTF(WM_DEBUG_RX,
   8324 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8325 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8326 			continue;
   8327 		}
   8328 
   8329 		/*
   8330 		 * Okay, we have the entire packet now.  The chip is
   8331 		 * configured to include the FCS except I350 and I21[01]
   8332 		 * (not all chips can be configured to strip it),
   8333 		 * so we need to trim it.
   8334 		 * May need to adjust length of previous mbuf in the
   8335 		 * chain if the current mbuf is too short.
   8336 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8337 		 * is always set in I350, so we don't trim it.
   8338 		 */
   8339 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8340 		    && (sc->sc_type != WM_T_I210)
   8341 		    && (sc->sc_type != WM_T_I211)) {
   8342 			if (m->m_len < ETHER_CRC_LEN) {
   8343 				rxq->rxq_tail->m_len
   8344 				    -= (ETHER_CRC_LEN - m->m_len);
   8345 				m->m_len = 0;
   8346 			} else
   8347 				m->m_len -= ETHER_CRC_LEN;
   8348 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8349 		} else
   8350 			len = rxq->rxq_len;
   8351 
   8352 		WM_RXCHAIN_LINK(rxq, m);
   8353 
   8354 		*rxq->rxq_tailp = NULL;
   8355 		m = rxq->rxq_head;
   8356 
   8357 		WM_RXCHAIN_RESET(rxq);
   8358 
   8359 		DPRINTF(WM_DEBUG_RX,
   8360 		    ("%s: RX: have entire packet, len -> %d\n",
   8361 		    device_xname(sc->sc_dev), len));
   8362 
   8363 		/* If an error occurred, update stats and drop the packet. */
   8364 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8365 			m_freem(m);
   8366 			continue;
   8367 		}
   8368 
   8369 		/* No errors.  Receive the packet. */
   8370 		m_set_rcvif(m, ifp);
   8371 		m->m_pkthdr.len = len;
   8372 		/*
   8373 		 * TODO
   8374 		 * should be save rsshash and rsstype to this mbuf.
   8375 		 */
   8376 		DPRINTF(WM_DEBUG_RX,
   8377 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8378 			device_xname(sc->sc_dev), rsstype, rsshash));
   8379 
   8380 		/*
   8381 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8382 		 * for us.  Associate the tag with the packet.
   8383 		 */
   8384 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8385 			continue;
   8386 
   8387 		/* Set up checksum info for this packet. */
   8388 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8389 		/*
   8390 		 * Update the receive pointer holding rxq_lock consistent with
   8391 		 * increment counter.
   8392 		 */
   8393 		rxq->rxq_ptr = i;
   8394 		rxq->rxq_packets++;
   8395 		rxq->rxq_bytes += len;
   8396 		mutex_exit(rxq->rxq_lock);
   8397 
   8398 		/* Pass it on. */
   8399 		if_percpuq_enqueue(sc->sc_ipq, m);
   8400 
   8401 		mutex_enter(rxq->rxq_lock);
   8402 
   8403 		if (rxq->rxq_stopping)
   8404 			break;
   8405 	}
   8406 
   8407 	if (count != 0)
   8408 		rnd_add_uint32(&sc->rnd_source, count);
   8409 
   8410 	DPRINTF(WM_DEBUG_RX,
   8411 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8412 }
   8413 
   8414 /*
   8415  * wm_linkintr_gmii:
   8416  *
   8417  *	Helper; handle link interrupts for GMII.
   8418  */
   8419 static void
   8420 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8421 {
   8422 
   8423 	KASSERT(WM_CORE_LOCKED(sc));
   8424 
   8425 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8426 		__func__));
   8427 
   8428 	if (icr & ICR_LSC) {
   8429 		uint32_t reg;
   8430 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8431 
   8432 		if ((status & STATUS_LU) != 0) {
   8433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8434 				device_xname(sc->sc_dev),
   8435 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8436 		} else {
   8437 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8438 				device_xname(sc->sc_dev)));
   8439 		}
   8440 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8441 			wm_gig_downshift_workaround_ich8lan(sc);
   8442 
   8443 		if ((sc->sc_type == WM_T_ICH8)
   8444 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8445 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8446 		}
   8447 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8448 			device_xname(sc->sc_dev)));
   8449 		mii_pollstat(&sc->sc_mii);
   8450 		if (sc->sc_type == WM_T_82543) {
   8451 			int miistatus, active;
   8452 
   8453 			/*
   8454 			 * With 82543, we need to force speed and
   8455 			 * duplex on the MAC equal to what the PHY
   8456 			 * speed and duplex configuration is.
   8457 			 */
   8458 			miistatus = sc->sc_mii.mii_media_status;
   8459 
   8460 			if (miistatus & IFM_ACTIVE) {
   8461 				active = sc->sc_mii.mii_media_active;
   8462 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8463 				switch (IFM_SUBTYPE(active)) {
   8464 				case IFM_10_T:
   8465 					sc->sc_ctrl |= CTRL_SPEED_10;
   8466 					break;
   8467 				case IFM_100_TX:
   8468 					sc->sc_ctrl |= CTRL_SPEED_100;
   8469 					break;
   8470 				case IFM_1000_T:
   8471 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8472 					break;
   8473 				default:
   8474 					/*
   8475 					 * fiber?
   8476 					 * Shoud not enter here.
   8477 					 */
   8478 					printf("unknown media (%x)\n", active);
   8479 					break;
   8480 				}
   8481 				if (active & IFM_FDX)
   8482 					sc->sc_ctrl |= CTRL_FD;
   8483 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8484 			}
   8485 		} else if (sc->sc_type == WM_T_PCH) {
   8486 			wm_k1_gig_workaround_hv(sc,
   8487 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8488 		}
   8489 
   8490 		if ((sc->sc_phytype == WMPHY_82578)
   8491 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8492 			== IFM_1000_T)) {
   8493 
   8494 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8495 				delay(200*1000); /* XXX too big */
   8496 
   8497 				/* Link stall fix for link up */
   8498 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8499 				    HV_MUX_DATA_CTRL,
   8500 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8501 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8502 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8503 				    HV_MUX_DATA_CTRL,
   8504 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8505 			}
   8506 		}
   8507 		/*
   8508 		 * I217 Packet Loss issue:
   8509 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8510 		 * on power up.
   8511 		 * Set the Beacon Duration for I217 to 8 usec
   8512 		 */
   8513 		if ((sc->sc_type == WM_T_PCH_LPT)
   8514 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8515 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8516 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8517 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8518 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8519 		}
   8520 
   8521 		/* XXX Work-around I218 hang issue */
   8522 		/* e1000_k1_workaround_lpt_lp() */
   8523 
   8524 		if ((sc->sc_type == WM_T_PCH_LPT)
   8525 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8526 			/*
   8527 			 * Set platform power management values for Latency
   8528 			 * Tolerance Reporting (LTR)
   8529 			 */
   8530 			wm_platform_pm_pch_lpt(sc,
   8531 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8532 				    != 0));
   8533 		}
   8534 
   8535 		/* FEXTNVM6 K1-off workaround */
   8536 		if (sc->sc_type == WM_T_PCH_SPT) {
   8537 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8538 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8539 			    & FEXTNVM6_K1_OFF_ENABLE)
   8540 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8541 			else
   8542 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8543 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8544 		}
   8545 	} else if (icr & ICR_RXSEQ) {
   8546 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8547 			device_xname(sc->sc_dev)));
   8548 	}
   8549 }
   8550 
   8551 /*
   8552  * wm_linkintr_tbi:
   8553  *
   8554  *	Helper; handle link interrupts for TBI mode.
   8555  */
   8556 static void
   8557 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8558 {
   8559 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8560 	uint32_t status;
   8561 
   8562 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8563 		__func__));
   8564 
   8565 	status = CSR_READ(sc, WMREG_STATUS);
   8566 	if (icr & ICR_LSC) {
   8567 		if (status & STATUS_LU) {
   8568 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8569 			    device_xname(sc->sc_dev),
   8570 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8571 			/*
   8572 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8573 			 * so we should update sc->sc_ctrl
   8574 			 */
   8575 
   8576 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8577 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8578 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8579 			if (status & STATUS_FD)
   8580 				sc->sc_tctl |=
   8581 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8582 			else
   8583 				sc->sc_tctl |=
   8584 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8585 			if (sc->sc_ctrl & CTRL_TFCE)
   8586 				sc->sc_fcrtl |= FCRTL_XONE;
   8587 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8588 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8589 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8590 				      sc->sc_fcrtl);
   8591 			sc->sc_tbi_linkup = 1;
   8592 			if_link_state_change(ifp, LINK_STATE_UP);
   8593 		} else {
   8594 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8595 			    device_xname(sc->sc_dev)));
   8596 			sc->sc_tbi_linkup = 0;
   8597 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8598 		}
   8599 		/* Update LED */
   8600 		wm_tbi_serdes_set_linkled(sc);
   8601 	} else if (icr & ICR_RXSEQ) {
   8602 		DPRINTF(WM_DEBUG_LINK,
   8603 		    ("%s: LINK: Receive sequence error\n",
   8604 		    device_xname(sc->sc_dev)));
   8605 	}
   8606 }
   8607 
   8608 /*
   8609  * wm_linkintr_serdes:
   8610  *
   8611  *	Helper; handle link interrupts for TBI mode.
   8612  */
   8613 static void
   8614 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8615 {
   8616 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8617 	struct mii_data *mii = &sc->sc_mii;
   8618 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8619 	uint32_t pcs_adv, pcs_lpab, reg;
   8620 
   8621 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8622 		__func__));
   8623 
   8624 	if (icr & ICR_LSC) {
   8625 		/* Check PCS */
   8626 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8627 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8628 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8629 				device_xname(sc->sc_dev)));
   8630 			mii->mii_media_status |= IFM_ACTIVE;
   8631 			sc->sc_tbi_linkup = 1;
   8632 			if_link_state_change(ifp, LINK_STATE_UP);
   8633 		} else {
   8634 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8635 				device_xname(sc->sc_dev)));
   8636 			mii->mii_media_status |= IFM_NONE;
   8637 			sc->sc_tbi_linkup = 0;
   8638 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8639 			wm_tbi_serdes_set_linkled(sc);
   8640 			return;
   8641 		}
   8642 		mii->mii_media_active |= IFM_1000_SX;
   8643 		if ((reg & PCS_LSTS_FDX) != 0)
   8644 			mii->mii_media_active |= IFM_FDX;
   8645 		else
   8646 			mii->mii_media_active |= IFM_HDX;
   8647 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8648 			/* Check flow */
   8649 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8650 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8651 				DPRINTF(WM_DEBUG_LINK,
   8652 				    ("XXX LINKOK but not ACOMP\n"));
   8653 				return;
   8654 			}
   8655 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8656 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8657 			DPRINTF(WM_DEBUG_LINK,
   8658 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8659 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8660 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8661 				mii->mii_media_active |= IFM_FLOW
   8662 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8663 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8664 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8665 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8666 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8667 				mii->mii_media_active |= IFM_FLOW
   8668 				    | IFM_ETH_TXPAUSE;
   8669 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8670 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8671 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8672 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8673 				mii->mii_media_active |= IFM_FLOW
   8674 				    | IFM_ETH_RXPAUSE;
   8675 		}
   8676 		/* Update LED */
   8677 		wm_tbi_serdes_set_linkled(sc);
   8678 	} else {
   8679 		DPRINTF(WM_DEBUG_LINK,
   8680 		    ("%s: LINK: Receive sequence error\n",
   8681 		    device_xname(sc->sc_dev)));
   8682 	}
   8683 }
   8684 
   8685 /*
   8686  * wm_linkintr:
   8687  *
   8688  *	Helper; handle link interrupts.
   8689  */
   8690 static void
   8691 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8692 {
   8693 
   8694 	KASSERT(WM_CORE_LOCKED(sc));
   8695 
   8696 	if (sc->sc_flags & WM_F_HAS_MII)
   8697 		wm_linkintr_gmii(sc, icr);
   8698 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8699 	    && (sc->sc_type >= WM_T_82575))
   8700 		wm_linkintr_serdes(sc, icr);
   8701 	else
   8702 		wm_linkintr_tbi(sc, icr);
   8703 }
   8704 
   8705 /*
   8706  * wm_intr_legacy:
   8707  *
   8708  *	Interrupt service routine for INTx and MSI.
   8709  */
   8710 static int
   8711 wm_intr_legacy(void *arg)
   8712 {
   8713 	struct wm_softc *sc = arg;
   8714 	struct wm_queue *wmq = &sc->sc_queue[0];
   8715 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8716 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8717 	uint32_t icr, rndval = 0;
   8718 	int handled = 0;
   8719 
   8720 	while (1 /* CONSTCOND */) {
   8721 		icr = CSR_READ(sc, WMREG_ICR);
   8722 		if ((icr & sc->sc_icr) == 0)
   8723 			break;
   8724 		if (handled == 0) {
   8725 			DPRINTF(WM_DEBUG_TX,
   8726 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8727 		}
   8728 		if (rndval == 0)
   8729 			rndval = icr;
   8730 
   8731 		mutex_enter(rxq->rxq_lock);
   8732 
   8733 		if (rxq->rxq_stopping) {
   8734 			mutex_exit(rxq->rxq_lock);
   8735 			break;
   8736 		}
   8737 
   8738 		handled = 1;
   8739 
   8740 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8741 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8742 			DPRINTF(WM_DEBUG_RX,
   8743 			    ("%s: RX: got Rx intr 0x%08x\n",
   8744 			    device_xname(sc->sc_dev),
   8745 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8746 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8747 		}
   8748 #endif
   8749 		wm_rxeof(rxq, UINT_MAX);
   8750 
   8751 		mutex_exit(rxq->rxq_lock);
   8752 		mutex_enter(txq->txq_lock);
   8753 
   8754 		if (txq->txq_stopping) {
   8755 			mutex_exit(txq->txq_lock);
   8756 			break;
   8757 		}
   8758 
   8759 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8760 		if (icr & ICR_TXDW) {
   8761 			DPRINTF(WM_DEBUG_TX,
   8762 			    ("%s: TX: got TXDW interrupt\n",
   8763 			    device_xname(sc->sc_dev)));
   8764 			WM_Q_EVCNT_INCR(txq, txdw);
   8765 		}
   8766 #endif
   8767 		wm_txeof(sc, txq);
   8768 
   8769 		mutex_exit(txq->txq_lock);
   8770 		WM_CORE_LOCK(sc);
   8771 
   8772 		if (sc->sc_core_stopping) {
   8773 			WM_CORE_UNLOCK(sc);
   8774 			break;
   8775 		}
   8776 
   8777 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8778 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8779 			wm_linkintr(sc, icr);
   8780 		}
   8781 
   8782 		WM_CORE_UNLOCK(sc);
   8783 
   8784 		if (icr & ICR_RXO) {
   8785 #if defined(WM_DEBUG)
   8786 			log(LOG_WARNING, "%s: Receive overrun\n",
   8787 			    device_xname(sc->sc_dev));
   8788 #endif /* defined(WM_DEBUG) */
   8789 		}
   8790 	}
   8791 
   8792 	rnd_add_uint32(&sc->rnd_source, rndval);
   8793 
   8794 	if (handled) {
   8795 		/* Try to get more packets going. */
   8796 		softint_schedule(wmq->wmq_si);
   8797 	}
   8798 
   8799 	return handled;
   8800 }
   8801 
   8802 static inline void
   8803 wm_txrxintr_disable(struct wm_queue *wmq)
   8804 {
   8805 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8806 
   8807 	if (sc->sc_type == WM_T_82574)
   8808 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8809 	else if (sc->sc_type == WM_T_82575)
   8810 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8811 	else
   8812 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8813 }
   8814 
   8815 static inline void
   8816 wm_txrxintr_enable(struct wm_queue *wmq)
   8817 {
   8818 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8819 
   8820 	wm_itrs_calculate(sc, wmq);
   8821 
   8822 	if (sc->sc_type == WM_T_82574)
   8823 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8824 	else if (sc->sc_type == WM_T_82575)
   8825 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8826 	else
   8827 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8828 }
   8829 
   8830 static int
   8831 wm_txrxintr_msix(void *arg)
   8832 {
   8833 	struct wm_queue *wmq = arg;
   8834 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8835 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8836 	struct wm_softc *sc = txq->txq_sc;
   8837 	u_int limit = sc->sc_rx_intr_process_limit;
   8838 
   8839 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8840 
   8841 	DPRINTF(WM_DEBUG_TX,
   8842 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8843 
   8844 	wm_txrxintr_disable(wmq);
   8845 
   8846 	mutex_enter(txq->txq_lock);
   8847 
   8848 	if (txq->txq_stopping) {
   8849 		mutex_exit(txq->txq_lock);
   8850 		return 0;
   8851 	}
   8852 
   8853 	WM_Q_EVCNT_INCR(txq, txdw);
   8854 	wm_txeof(sc, txq);
   8855 	/* wm_deferred start() is done in wm_handle_queue(). */
   8856 	mutex_exit(txq->txq_lock);
   8857 
   8858 	DPRINTF(WM_DEBUG_RX,
   8859 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8860 	mutex_enter(rxq->rxq_lock);
   8861 
   8862 	if (rxq->rxq_stopping) {
   8863 		mutex_exit(rxq->rxq_lock);
   8864 		return 0;
   8865 	}
   8866 
   8867 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8868 	wm_rxeof(rxq, limit);
   8869 	mutex_exit(rxq->rxq_lock);
   8870 
   8871 	wm_itrs_writereg(sc, wmq);
   8872 
   8873 	softint_schedule(wmq->wmq_si);
   8874 
   8875 	return 1;
   8876 }
   8877 
   8878 static void
   8879 wm_handle_queue(void *arg)
   8880 {
   8881 	struct wm_queue *wmq = arg;
   8882 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8883 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8884 	struct wm_softc *sc = txq->txq_sc;
   8885 	u_int limit = sc->sc_rx_process_limit;
   8886 
   8887 	mutex_enter(txq->txq_lock);
   8888 	if (txq->txq_stopping) {
   8889 		mutex_exit(txq->txq_lock);
   8890 		return;
   8891 	}
   8892 	wm_txeof(sc, txq);
   8893 	wm_deferred_start_locked(txq);
   8894 	mutex_exit(txq->txq_lock);
   8895 
   8896 	mutex_enter(rxq->rxq_lock);
   8897 	if (rxq->rxq_stopping) {
   8898 		mutex_exit(rxq->rxq_lock);
   8899 		return;
   8900 	}
   8901 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8902 	wm_rxeof(rxq, limit);
   8903 	mutex_exit(rxq->rxq_lock);
   8904 
   8905 	wm_txrxintr_enable(wmq);
   8906 }
   8907 
   8908 /*
   8909  * wm_linkintr_msix:
   8910  *
   8911  *	Interrupt service routine for link status change for MSI-X.
   8912  */
   8913 static int
   8914 wm_linkintr_msix(void *arg)
   8915 {
   8916 	struct wm_softc *sc = arg;
   8917 	uint32_t reg;
   8918 
   8919 	DPRINTF(WM_DEBUG_LINK,
   8920 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8921 
   8922 	reg = CSR_READ(sc, WMREG_ICR);
   8923 	WM_CORE_LOCK(sc);
   8924 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8925 		goto out;
   8926 
   8927 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8928 	wm_linkintr(sc, ICR_LSC);
   8929 
   8930 out:
   8931 	WM_CORE_UNLOCK(sc);
   8932 
   8933 	if (sc->sc_type == WM_T_82574)
   8934 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8935 	else if (sc->sc_type == WM_T_82575)
   8936 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8937 	else
   8938 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8939 
   8940 	return 1;
   8941 }
   8942 
   8943 /*
   8944  * Media related.
   8945  * GMII, SGMII, TBI (and SERDES)
   8946  */
   8947 
   8948 /* Common */
   8949 
   8950 /*
   8951  * wm_tbi_serdes_set_linkled:
   8952  *
   8953  *	Update the link LED on TBI and SERDES devices.
   8954  */
   8955 static void
   8956 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8957 {
   8958 
   8959 	if (sc->sc_tbi_linkup)
   8960 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8961 	else
   8962 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8963 
   8964 	/* 82540 or newer devices are active low */
   8965 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8966 
   8967 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8968 }
   8969 
   8970 /* GMII related */
   8971 
   8972 /*
   8973  * wm_gmii_reset:
   8974  *
   8975  *	Reset the PHY.
   8976  */
   8977 static void
   8978 wm_gmii_reset(struct wm_softc *sc)
   8979 {
   8980 	uint32_t reg;
   8981 	int rv;
   8982 
   8983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8984 		device_xname(sc->sc_dev), __func__));
   8985 
   8986 	rv = sc->phy.acquire(sc);
   8987 	if (rv != 0) {
   8988 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8989 		    __func__);
   8990 		return;
   8991 	}
   8992 
   8993 	switch (sc->sc_type) {
   8994 	case WM_T_82542_2_0:
   8995 	case WM_T_82542_2_1:
   8996 		/* null */
   8997 		break;
   8998 	case WM_T_82543:
   8999 		/*
   9000 		 * With 82543, we need to force speed and duplex on the MAC
   9001 		 * equal to what the PHY speed and duplex configuration is.
   9002 		 * In addition, we need to perform a hardware reset on the PHY
   9003 		 * to take it out of reset.
   9004 		 */
   9005 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9006 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9007 
   9008 		/* The PHY reset pin is active-low. */
   9009 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9010 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9011 		    CTRL_EXT_SWDPIN(4));
   9012 		reg |= CTRL_EXT_SWDPIO(4);
   9013 
   9014 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9015 		CSR_WRITE_FLUSH(sc);
   9016 		delay(10*1000);
   9017 
   9018 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9019 		CSR_WRITE_FLUSH(sc);
   9020 		delay(150);
   9021 #if 0
   9022 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9023 #endif
   9024 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9025 		break;
   9026 	case WM_T_82544:	/* reset 10000us */
   9027 	case WM_T_82540:
   9028 	case WM_T_82545:
   9029 	case WM_T_82545_3:
   9030 	case WM_T_82546:
   9031 	case WM_T_82546_3:
   9032 	case WM_T_82541:
   9033 	case WM_T_82541_2:
   9034 	case WM_T_82547:
   9035 	case WM_T_82547_2:
   9036 	case WM_T_82571:	/* reset 100us */
   9037 	case WM_T_82572:
   9038 	case WM_T_82573:
   9039 	case WM_T_82574:
   9040 	case WM_T_82575:
   9041 	case WM_T_82576:
   9042 	case WM_T_82580:
   9043 	case WM_T_I350:
   9044 	case WM_T_I354:
   9045 	case WM_T_I210:
   9046 	case WM_T_I211:
   9047 	case WM_T_82583:
   9048 	case WM_T_80003:
   9049 		/* generic reset */
   9050 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9051 		CSR_WRITE_FLUSH(sc);
   9052 		delay(20000);
   9053 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9054 		CSR_WRITE_FLUSH(sc);
   9055 		delay(20000);
   9056 
   9057 		if ((sc->sc_type == WM_T_82541)
   9058 		    || (sc->sc_type == WM_T_82541_2)
   9059 		    || (sc->sc_type == WM_T_82547)
   9060 		    || (sc->sc_type == WM_T_82547_2)) {
   9061 			/* workaround for igp are done in igp_reset() */
   9062 			/* XXX add code to set LED after phy reset */
   9063 		}
   9064 		break;
   9065 	case WM_T_ICH8:
   9066 	case WM_T_ICH9:
   9067 	case WM_T_ICH10:
   9068 	case WM_T_PCH:
   9069 	case WM_T_PCH2:
   9070 	case WM_T_PCH_LPT:
   9071 	case WM_T_PCH_SPT:
   9072 		/* generic reset */
   9073 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9074 		CSR_WRITE_FLUSH(sc);
   9075 		delay(100);
   9076 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9077 		CSR_WRITE_FLUSH(sc);
   9078 		delay(150);
   9079 		break;
   9080 	default:
   9081 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9082 		    __func__);
   9083 		break;
   9084 	}
   9085 
   9086 	sc->phy.release(sc);
   9087 
   9088 	/* get_cfg_done */
   9089 	wm_get_cfg_done(sc);
   9090 
   9091 	/* extra setup */
   9092 	switch (sc->sc_type) {
   9093 	case WM_T_82542_2_0:
   9094 	case WM_T_82542_2_1:
   9095 	case WM_T_82543:
   9096 	case WM_T_82544:
   9097 	case WM_T_82540:
   9098 	case WM_T_82545:
   9099 	case WM_T_82545_3:
   9100 	case WM_T_82546:
   9101 	case WM_T_82546_3:
   9102 	case WM_T_82541_2:
   9103 	case WM_T_82547_2:
   9104 	case WM_T_82571:
   9105 	case WM_T_82572:
   9106 	case WM_T_82573:
   9107 	case WM_T_82574:
   9108 	case WM_T_82583:
   9109 	case WM_T_82575:
   9110 	case WM_T_82576:
   9111 	case WM_T_82580:
   9112 	case WM_T_I350:
   9113 	case WM_T_I354:
   9114 	case WM_T_I210:
   9115 	case WM_T_I211:
   9116 	case WM_T_80003:
   9117 		/* null */
   9118 		break;
   9119 	case WM_T_82541:
   9120 	case WM_T_82547:
   9121 		/* XXX Configure actively LED after PHY reset */
   9122 		break;
   9123 	case WM_T_ICH8:
   9124 	case WM_T_ICH9:
   9125 	case WM_T_ICH10:
   9126 	case WM_T_PCH:
   9127 	case WM_T_PCH2:
   9128 	case WM_T_PCH_LPT:
   9129 	case WM_T_PCH_SPT:
   9130 		wm_phy_post_reset(sc);
   9131 		break;
   9132 	default:
   9133 		panic("%s: unknown type\n", __func__);
   9134 		break;
   9135 	}
   9136 }
   9137 
   9138 /*
   9139  * Setup sc_phytype and mii_{read|write}reg.
   9140  *
   9141  *  To identify PHY type, correct read/write function should be selected.
   9142  * To select correct read/write function, PCI ID or MAC type are required
   9143  * without accessing PHY registers.
   9144  *
   9145  *  On the first call of this function, PHY ID is not known yet. Check
   9146  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9147  * result might be incorrect.
   9148  *
   9149  *  In the second call, PHY OUI and model is used to identify PHY type.
   9150  * It might not be perfpect because of the lack of compared entry, but it
   9151  * would be better than the first call.
   9152  *
   9153  *  If the detected new result and previous assumption is different,
   9154  * diagnous message will be printed.
   9155  */
   9156 static void
   9157 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9158     uint16_t phy_model)
   9159 {
   9160 	device_t dev = sc->sc_dev;
   9161 	struct mii_data *mii = &sc->sc_mii;
   9162 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9163 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9164 	mii_readreg_t new_readreg;
   9165 	mii_writereg_t new_writereg;
   9166 
   9167 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9168 		device_xname(sc->sc_dev), __func__));
   9169 
   9170 	if (mii->mii_readreg == NULL) {
   9171 		/*
   9172 		 *  This is the first call of this function. For ICH and PCH
   9173 		 * variants, it's difficult to determine the PHY access method
   9174 		 * by sc_type, so use the PCI product ID for some devices.
   9175 		 */
   9176 
   9177 		switch (sc->sc_pcidevid) {
   9178 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9179 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9180 			/* 82577 */
   9181 			new_phytype = WMPHY_82577;
   9182 			break;
   9183 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9184 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9185 			/* 82578 */
   9186 			new_phytype = WMPHY_82578;
   9187 			break;
   9188 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9189 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9190 			/* 82579 */
   9191 			new_phytype = WMPHY_82579;
   9192 			break;
   9193 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9194 		case PCI_PRODUCT_INTEL_82801I_BM:
   9195 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9196 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9197 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9198 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9199 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9200 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9201 			/* ICH8, 9, 10 with 82567 */
   9202 			new_phytype = WMPHY_BM;
   9203 			break;
   9204 		default:
   9205 			break;
   9206 		}
   9207 	} else {
   9208 		/* It's not the first call. Use PHY OUI and model */
   9209 		switch (phy_oui) {
   9210 		case MII_OUI_ATHEROS: /* XXX ??? */
   9211 			switch (phy_model) {
   9212 			case 0x0004: /* XXX */
   9213 				new_phytype = WMPHY_82578;
   9214 				break;
   9215 			default:
   9216 				break;
   9217 			}
   9218 			break;
   9219 		case MII_OUI_xxMARVELL:
   9220 			switch (phy_model) {
   9221 			case MII_MODEL_xxMARVELL_I210:
   9222 				new_phytype = WMPHY_I210;
   9223 				break;
   9224 			case MII_MODEL_xxMARVELL_E1011:
   9225 			case MII_MODEL_xxMARVELL_E1000_3:
   9226 			case MII_MODEL_xxMARVELL_E1000_5:
   9227 			case MII_MODEL_xxMARVELL_E1112:
   9228 				new_phytype = WMPHY_M88;
   9229 				break;
   9230 			case MII_MODEL_xxMARVELL_E1149:
   9231 				new_phytype = WMPHY_BM;
   9232 				break;
   9233 			case MII_MODEL_xxMARVELL_E1111:
   9234 			case MII_MODEL_xxMARVELL_I347:
   9235 			case MII_MODEL_xxMARVELL_E1512:
   9236 			case MII_MODEL_xxMARVELL_E1340M:
   9237 			case MII_MODEL_xxMARVELL_E1543:
   9238 				new_phytype = WMPHY_M88;
   9239 				break;
   9240 			case MII_MODEL_xxMARVELL_I82563:
   9241 				new_phytype = WMPHY_GG82563;
   9242 				break;
   9243 			default:
   9244 				break;
   9245 			}
   9246 			break;
   9247 		case MII_OUI_INTEL:
   9248 			switch (phy_model) {
   9249 			case MII_MODEL_INTEL_I82577:
   9250 				new_phytype = WMPHY_82577;
   9251 				break;
   9252 			case MII_MODEL_INTEL_I82579:
   9253 				new_phytype = WMPHY_82579;
   9254 				break;
   9255 			case MII_MODEL_INTEL_I217:
   9256 				new_phytype = WMPHY_I217;
   9257 				break;
   9258 			case MII_MODEL_INTEL_I82580:
   9259 			case MII_MODEL_INTEL_I350:
   9260 				new_phytype = WMPHY_82580;
   9261 				break;
   9262 			default:
   9263 				break;
   9264 			}
   9265 			break;
   9266 		case MII_OUI_yyINTEL:
   9267 			switch (phy_model) {
   9268 			case MII_MODEL_yyINTEL_I82562G:
   9269 			case MII_MODEL_yyINTEL_I82562EM:
   9270 			case MII_MODEL_yyINTEL_I82562ET:
   9271 				new_phytype = WMPHY_IFE;
   9272 				break;
   9273 			case MII_MODEL_yyINTEL_IGP01E1000:
   9274 				new_phytype = WMPHY_IGP;
   9275 				break;
   9276 			case MII_MODEL_yyINTEL_I82566:
   9277 				new_phytype = WMPHY_IGP_3;
   9278 				break;
   9279 			default:
   9280 				break;
   9281 			}
   9282 			break;
   9283 		default:
   9284 			break;
   9285 		}
   9286 		if (new_phytype == WMPHY_UNKNOWN)
   9287 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9288 			    __func__);
   9289 
   9290 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9291 		    && (sc->sc_phytype != new_phytype )) {
   9292 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9293 			    "was incorrect. PHY type from PHY ID = %u\n",
   9294 			    sc->sc_phytype, new_phytype);
   9295 		}
   9296 	}
   9297 
   9298 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9299 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9300 		/* SGMII */
   9301 		new_readreg = wm_sgmii_readreg;
   9302 		new_writereg = wm_sgmii_writereg;
   9303 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9304 		/* BM2 (phyaddr == 1) */
   9305 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9306 		    && (new_phytype != WMPHY_BM)
   9307 		    && (new_phytype != WMPHY_UNKNOWN))
   9308 			doubt_phytype = new_phytype;
   9309 		new_phytype = WMPHY_BM;
   9310 		new_readreg = wm_gmii_bm_readreg;
   9311 		new_writereg = wm_gmii_bm_writereg;
   9312 	} else if (sc->sc_type >= WM_T_PCH) {
   9313 		/* All PCH* use _hv_ */
   9314 		new_readreg = wm_gmii_hv_readreg;
   9315 		new_writereg = wm_gmii_hv_writereg;
   9316 	} else if (sc->sc_type >= WM_T_ICH8) {
   9317 		/* non-82567 ICH8, 9 and 10 */
   9318 		new_readreg = wm_gmii_i82544_readreg;
   9319 		new_writereg = wm_gmii_i82544_writereg;
   9320 	} else if (sc->sc_type >= WM_T_80003) {
   9321 		/* 80003 */
   9322 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9323 		    && (new_phytype != WMPHY_GG82563)
   9324 		    && (new_phytype != WMPHY_UNKNOWN))
   9325 			doubt_phytype = new_phytype;
   9326 		new_phytype = WMPHY_GG82563;
   9327 		new_readreg = wm_gmii_i80003_readreg;
   9328 		new_writereg = wm_gmii_i80003_writereg;
   9329 	} else if (sc->sc_type >= WM_T_I210) {
   9330 		/* I210 and I211 */
   9331 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9332 		    && (new_phytype != WMPHY_I210)
   9333 		    && (new_phytype != WMPHY_UNKNOWN))
   9334 			doubt_phytype = new_phytype;
   9335 		new_phytype = WMPHY_I210;
   9336 		new_readreg = wm_gmii_gs40g_readreg;
   9337 		new_writereg = wm_gmii_gs40g_writereg;
   9338 	} else if (sc->sc_type >= WM_T_82580) {
   9339 		/* 82580, I350 and I354 */
   9340 		new_readreg = wm_gmii_82580_readreg;
   9341 		new_writereg = wm_gmii_82580_writereg;
   9342 	} else if (sc->sc_type >= WM_T_82544) {
   9343 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9344 		new_readreg = wm_gmii_i82544_readreg;
   9345 		new_writereg = wm_gmii_i82544_writereg;
   9346 	} else {
   9347 		new_readreg = wm_gmii_i82543_readreg;
   9348 		new_writereg = wm_gmii_i82543_writereg;
   9349 	}
   9350 
   9351 	if (new_phytype == WMPHY_BM) {
   9352 		/* All BM use _bm_ */
   9353 		new_readreg = wm_gmii_bm_readreg;
   9354 		new_writereg = wm_gmii_bm_writereg;
   9355 	}
   9356 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9357 		/* All PCH* use _hv_ */
   9358 		new_readreg = wm_gmii_hv_readreg;
   9359 		new_writereg = wm_gmii_hv_writereg;
   9360 	}
   9361 
   9362 	/* Diag output */
   9363 	if (doubt_phytype != WMPHY_UNKNOWN)
   9364 		aprint_error_dev(dev, "Assumed new PHY type was "
   9365 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9366 		    new_phytype);
   9367 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9368 	    && (sc->sc_phytype != new_phytype ))
   9369 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9370 		    "was incorrect. New PHY type = %u\n",
   9371 		    sc->sc_phytype, new_phytype);
   9372 
   9373 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9374 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9375 
   9376 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9377 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9378 		    "function was incorrect.\n");
   9379 
   9380 	/* Update now */
   9381 	sc->sc_phytype = new_phytype;
   9382 	mii->mii_readreg = new_readreg;
   9383 	mii->mii_writereg = new_writereg;
   9384 }
   9385 
   9386 /*
   9387  * wm_get_phy_id_82575:
   9388  *
   9389  * Return PHY ID. Return -1 if it failed.
   9390  */
   9391 static int
   9392 wm_get_phy_id_82575(struct wm_softc *sc)
   9393 {
   9394 	uint32_t reg;
   9395 	int phyid = -1;
   9396 
   9397 	/* XXX */
   9398 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9399 		return -1;
   9400 
   9401 	if (wm_sgmii_uses_mdio(sc)) {
   9402 		switch (sc->sc_type) {
   9403 		case WM_T_82575:
   9404 		case WM_T_82576:
   9405 			reg = CSR_READ(sc, WMREG_MDIC);
   9406 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9407 			break;
   9408 		case WM_T_82580:
   9409 		case WM_T_I350:
   9410 		case WM_T_I354:
   9411 		case WM_T_I210:
   9412 		case WM_T_I211:
   9413 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9414 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9415 			break;
   9416 		default:
   9417 			return -1;
   9418 		}
   9419 	}
   9420 
   9421 	return phyid;
   9422 }
   9423 
   9424 
   9425 /*
   9426  * wm_gmii_mediainit:
   9427  *
   9428  *	Initialize media for use on 1000BASE-T devices.
   9429  */
   9430 static void
   9431 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9432 {
   9433 	device_t dev = sc->sc_dev;
   9434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9435 	struct mii_data *mii = &sc->sc_mii;
   9436 	uint32_t reg;
   9437 
   9438 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9439 		device_xname(sc->sc_dev), __func__));
   9440 
   9441 	/* We have GMII. */
   9442 	sc->sc_flags |= WM_F_HAS_MII;
   9443 
   9444 	if (sc->sc_type == WM_T_80003)
   9445 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9446 	else
   9447 		sc->sc_tipg = TIPG_1000T_DFLT;
   9448 
   9449 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9450 	if ((sc->sc_type == WM_T_82580)
   9451 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9452 	    || (sc->sc_type == WM_T_I211)) {
   9453 		reg = CSR_READ(sc, WMREG_PHPM);
   9454 		reg &= ~PHPM_GO_LINK_D;
   9455 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9456 	}
   9457 
   9458 	/*
   9459 	 * Let the chip set speed/duplex on its own based on
   9460 	 * signals from the PHY.
   9461 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9462 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9463 	 */
   9464 	sc->sc_ctrl |= CTRL_SLU;
   9465 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9466 
   9467 	/* Initialize our media structures and probe the GMII. */
   9468 	mii->mii_ifp = ifp;
   9469 
   9470 	mii->mii_statchg = wm_gmii_statchg;
   9471 
   9472 	/* get PHY control from SMBus to PCIe */
   9473 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9474 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9475 		wm_smbustopci(sc);
   9476 
   9477 	wm_gmii_reset(sc);
   9478 
   9479 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9480 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9481 	    wm_gmii_mediastatus);
   9482 
   9483 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9484 	    || (sc->sc_type == WM_T_82580)
   9485 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9486 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9487 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9488 			/* Attach only one port */
   9489 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9490 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9491 		} else {
   9492 			int i, id;
   9493 			uint32_t ctrl_ext;
   9494 
   9495 			id = wm_get_phy_id_82575(sc);
   9496 			if (id != -1) {
   9497 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9498 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9499 			}
   9500 			if ((id == -1)
   9501 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9502 				/* Power on sgmii phy if it is disabled */
   9503 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9504 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9505 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9506 				CSR_WRITE_FLUSH(sc);
   9507 				delay(300*1000); /* XXX too long */
   9508 
   9509 				/* from 1 to 8 */
   9510 				for (i = 1; i < 8; i++)
   9511 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9512 					    0xffffffff, i, MII_OFFSET_ANY,
   9513 					    MIIF_DOPAUSE);
   9514 
   9515 				/* restore previous sfp cage power state */
   9516 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9517 			}
   9518 		}
   9519 	} else {
   9520 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9521 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9522 	}
   9523 
   9524 	/*
   9525 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9526 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9527 	 */
   9528 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9529 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9530 		wm_set_mdio_slow_mode_hv(sc);
   9531 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9532 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9533 	}
   9534 
   9535 	/*
   9536 	 * (For ICH8 variants)
   9537 	 * If PHY detection failed, use BM's r/w function and retry.
   9538 	 */
   9539 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9540 		/* if failed, retry with *_bm_* */
   9541 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9542 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9543 		    sc->sc_phytype);
   9544 		sc->sc_phytype = WMPHY_BM;
   9545 		mii->mii_readreg = wm_gmii_bm_readreg;
   9546 		mii->mii_writereg = wm_gmii_bm_writereg;
   9547 
   9548 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9549 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9550 	}
   9551 
   9552 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9553 		/* Any PHY wasn't find */
   9554 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9555 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9556 		sc->sc_phytype = WMPHY_NONE;
   9557 	} else {
   9558 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9559 
   9560 		/*
   9561 		 * PHY Found! Check PHY type again by the second call of
   9562 		 * wm_gmii_setup_phytype.
   9563 		 */
   9564 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9565 		    child->mii_mpd_model);
   9566 
   9567 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9568 	}
   9569 }
   9570 
   9571 /*
   9572  * wm_gmii_mediachange:	[ifmedia interface function]
   9573  *
   9574  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9575  */
   9576 static int
   9577 wm_gmii_mediachange(struct ifnet *ifp)
   9578 {
   9579 	struct wm_softc *sc = ifp->if_softc;
   9580 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9581 	int rc;
   9582 
   9583 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9584 		device_xname(sc->sc_dev), __func__));
   9585 	if ((ifp->if_flags & IFF_UP) == 0)
   9586 		return 0;
   9587 
   9588 	/* Disable D0 LPLU. */
   9589 	wm_lplu_d0_disable(sc);
   9590 
   9591 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9592 	sc->sc_ctrl |= CTRL_SLU;
   9593 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9594 	    || (sc->sc_type > WM_T_82543)) {
   9595 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9596 	} else {
   9597 		sc->sc_ctrl &= ~CTRL_ASDE;
   9598 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9599 		if (ife->ifm_media & IFM_FDX)
   9600 			sc->sc_ctrl |= CTRL_FD;
   9601 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9602 		case IFM_10_T:
   9603 			sc->sc_ctrl |= CTRL_SPEED_10;
   9604 			break;
   9605 		case IFM_100_TX:
   9606 			sc->sc_ctrl |= CTRL_SPEED_100;
   9607 			break;
   9608 		case IFM_1000_T:
   9609 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9610 			break;
   9611 		default:
   9612 			panic("wm_gmii_mediachange: bad media 0x%x",
   9613 			    ife->ifm_media);
   9614 		}
   9615 	}
   9616 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9617 	CSR_WRITE_FLUSH(sc);
   9618 	if (sc->sc_type <= WM_T_82543)
   9619 		wm_gmii_reset(sc);
   9620 
   9621 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9622 		return 0;
   9623 	return rc;
   9624 }
   9625 
   9626 /*
   9627  * wm_gmii_mediastatus:	[ifmedia interface function]
   9628  *
   9629  *	Get the current interface media status on a 1000BASE-T device.
   9630  */
   9631 static void
   9632 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9633 {
   9634 	struct wm_softc *sc = ifp->if_softc;
   9635 
   9636 	ether_mediastatus(ifp, ifmr);
   9637 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9638 	    | sc->sc_flowflags;
   9639 }
   9640 
   9641 #define	MDI_IO		CTRL_SWDPIN(2)
   9642 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9643 #define	MDI_CLK		CTRL_SWDPIN(3)
   9644 
   9645 static void
   9646 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9647 {
   9648 	uint32_t i, v;
   9649 
   9650 	v = CSR_READ(sc, WMREG_CTRL);
   9651 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9652 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9653 
   9654 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9655 		if (data & i)
   9656 			v |= MDI_IO;
   9657 		else
   9658 			v &= ~MDI_IO;
   9659 		CSR_WRITE(sc, WMREG_CTRL, v);
   9660 		CSR_WRITE_FLUSH(sc);
   9661 		delay(10);
   9662 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9663 		CSR_WRITE_FLUSH(sc);
   9664 		delay(10);
   9665 		CSR_WRITE(sc, WMREG_CTRL, v);
   9666 		CSR_WRITE_FLUSH(sc);
   9667 		delay(10);
   9668 	}
   9669 }
   9670 
   9671 static uint32_t
   9672 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9673 {
   9674 	uint32_t v, i, data = 0;
   9675 
   9676 	v = CSR_READ(sc, WMREG_CTRL);
   9677 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9678 	v |= CTRL_SWDPIO(3);
   9679 
   9680 	CSR_WRITE(sc, WMREG_CTRL, v);
   9681 	CSR_WRITE_FLUSH(sc);
   9682 	delay(10);
   9683 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9684 	CSR_WRITE_FLUSH(sc);
   9685 	delay(10);
   9686 	CSR_WRITE(sc, WMREG_CTRL, v);
   9687 	CSR_WRITE_FLUSH(sc);
   9688 	delay(10);
   9689 
   9690 	for (i = 0; i < 16; i++) {
   9691 		data <<= 1;
   9692 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9693 		CSR_WRITE_FLUSH(sc);
   9694 		delay(10);
   9695 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9696 			data |= 1;
   9697 		CSR_WRITE(sc, WMREG_CTRL, v);
   9698 		CSR_WRITE_FLUSH(sc);
   9699 		delay(10);
   9700 	}
   9701 
   9702 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9703 	CSR_WRITE_FLUSH(sc);
   9704 	delay(10);
   9705 	CSR_WRITE(sc, WMREG_CTRL, v);
   9706 	CSR_WRITE_FLUSH(sc);
   9707 	delay(10);
   9708 
   9709 	return data;
   9710 }
   9711 
   9712 #undef MDI_IO
   9713 #undef MDI_DIR
   9714 #undef MDI_CLK
   9715 
   9716 /*
   9717  * wm_gmii_i82543_readreg:	[mii interface function]
   9718  *
   9719  *	Read a PHY register on the GMII (i82543 version).
   9720  */
   9721 static int
   9722 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9723 {
   9724 	struct wm_softc *sc = device_private(dev);
   9725 	int rv;
   9726 
   9727 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9728 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9729 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9730 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9731 
   9732 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9733 	    device_xname(dev), phy, reg, rv));
   9734 
   9735 	return rv;
   9736 }
   9737 
   9738 /*
   9739  * wm_gmii_i82543_writereg:	[mii interface function]
   9740  *
   9741  *	Write a PHY register on the GMII (i82543 version).
   9742  */
   9743 static void
   9744 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9745 {
   9746 	struct wm_softc *sc = device_private(dev);
   9747 
   9748 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9749 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9750 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9751 	    (MII_COMMAND_START << 30), 32);
   9752 }
   9753 
   9754 /*
   9755  * wm_gmii_mdic_readreg:	[mii interface function]
   9756  *
   9757  *	Read a PHY register on the GMII.
   9758  */
   9759 static int
   9760 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9761 {
   9762 	struct wm_softc *sc = device_private(dev);
   9763 	uint32_t mdic = 0;
   9764 	int i, rv;
   9765 
   9766 	if (reg > MII_ADDRMASK) {
   9767 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9768 		    __func__, sc->sc_phytype, reg);
   9769 		reg &= MII_ADDRMASK;
   9770 	}
   9771 
   9772 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9773 	    MDIC_REGADD(reg));
   9774 
   9775 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9776 		mdic = CSR_READ(sc, WMREG_MDIC);
   9777 		if (mdic & MDIC_READY)
   9778 			break;
   9779 		delay(50);
   9780 	}
   9781 
   9782 	if ((mdic & MDIC_READY) == 0) {
   9783 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9784 		    device_xname(dev), phy, reg);
   9785 		rv = 0;
   9786 	} else if (mdic & MDIC_E) {
   9787 #if 0 /* This is normal if no PHY is present. */
   9788 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9789 		    device_xname(dev), phy, reg);
   9790 #endif
   9791 		rv = 0;
   9792 	} else {
   9793 		rv = MDIC_DATA(mdic);
   9794 		if (rv == 0xffff)
   9795 			rv = 0;
   9796 	}
   9797 
   9798 	return rv;
   9799 }
   9800 
   9801 /*
   9802  * wm_gmii_mdic_writereg:	[mii interface function]
   9803  *
   9804  *	Write a PHY register on the GMII.
   9805  */
   9806 static void
   9807 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9808 {
   9809 	struct wm_softc *sc = device_private(dev);
   9810 	uint32_t mdic = 0;
   9811 	int i;
   9812 
   9813 	if (reg > MII_ADDRMASK) {
   9814 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9815 		    __func__, sc->sc_phytype, reg);
   9816 		reg &= MII_ADDRMASK;
   9817 	}
   9818 
   9819 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9820 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9821 
   9822 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9823 		mdic = CSR_READ(sc, WMREG_MDIC);
   9824 		if (mdic & MDIC_READY)
   9825 			break;
   9826 		delay(50);
   9827 	}
   9828 
   9829 	if ((mdic & MDIC_READY) == 0)
   9830 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9831 		    device_xname(dev), phy, reg);
   9832 	else if (mdic & MDIC_E)
   9833 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9834 		    device_xname(dev), phy, reg);
   9835 }
   9836 
   9837 /*
   9838  * wm_gmii_i82544_readreg:	[mii interface function]
   9839  *
   9840  *	Read a PHY register on the GMII.
   9841  */
   9842 static int
   9843 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9844 {
   9845 	struct wm_softc *sc = device_private(dev);
   9846 	int rv;
   9847 
   9848 	if (sc->phy.acquire(sc)) {
   9849 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9850 		return 0;
   9851 	}
   9852 
   9853 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9854 		switch (sc->sc_phytype) {
   9855 		case WMPHY_IGP:
   9856 		case WMPHY_IGP_2:
   9857 		case WMPHY_IGP_3:
   9858 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9859 			break;
   9860 		default:
   9861 #ifdef WM_DEBUG
   9862 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9863 			    __func__, sc->sc_phytype, reg);
   9864 #endif
   9865 			break;
   9866 		}
   9867 	}
   9868 
   9869 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9870 	sc->phy.release(sc);
   9871 
   9872 	return rv;
   9873 }
   9874 
   9875 /*
   9876  * wm_gmii_i82544_writereg:	[mii interface function]
   9877  *
   9878  *	Write a PHY register on the GMII.
   9879  */
   9880 static void
   9881 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9882 {
   9883 	struct wm_softc *sc = device_private(dev);
   9884 
   9885 	if (sc->phy.acquire(sc)) {
   9886 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9887 		return;
   9888 	}
   9889 
   9890 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9891 		switch (sc->sc_phytype) {
   9892 		case WMPHY_IGP:
   9893 		case WMPHY_IGP_2:
   9894 		case WMPHY_IGP_3:
   9895 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9896 			break;
   9897 		default:
   9898 #ifdef WM_DEBUG
   9899 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9900 			    __func__, sc->sc_phytype, reg);
   9901 #endif
   9902 			break;
   9903 		}
   9904 	}
   9905 
   9906 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9907 	sc->phy.release(sc);
   9908 }
   9909 
   9910 /*
   9911  * wm_gmii_i80003_readreg:	[mii interface function]
   9912  *
   9913  *	Read a PHY register on the kumeran
   9914  * This could be handled by the PHY layer if we didn't have to lock the
   9915  * ressource ...
   9916  */
   9917 static int
   9918 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9919 {
   9920 	struct wm_softc *sc = device_private(dev);
   9921 	int rv;
   9922 
   9923 	if (phy != 1) /* only one PHY on kumeran bus */
   9924 		return 0;
   9925 
   9926 	if (sc->phy.acquire(sc)) {
   9927 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9928 		return 0;
   9929 	}
   9930 
   9931 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9932 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9933 		    reg >> GG82563_PAGE_SHIFT);
   9934 	} else {
   9935 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9936 		    reg >> GG82563_PAGE_SHIFT);
   9937 	}
   9938 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9939 	delay(200);
   9940 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9941 	delay(200);
   9942 	sc->phy.release(sc);
   9943 
   9944 	return rv;
   9945 }
   9946 
   9947 /*
   9948  * wm_gmii_i80003_writereg:	[mii interface function]
   9949  *
   9950  *	Write a PHY register on the kumeran.
   9951  * This could be handled by the PHY layer if we didn't have to lock the
   9952  * ressource ...
   9953  */
   9954 static void
   9955 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9956 {
   9957 	struct wm_softc *sc = device_private(dev);
   9958 
   9959 	if (phy != 1) /* only one PHY on kumeran bus */
   9960 		return;
   9961 
   9962 	if (sc->phy.acquire(sc)) {
   9963 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9964 		return;
   9965 	}
   9966 
   9967 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9968 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9969 		    reg >> GG82563_PAGE_SHIFT);
   9970 	} else {
   9971 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9972 		    reg >> GG82563_PAGE_SHIFT);
   9973 	}
   9974 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9975 	delay(200);
   9976 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9977 	delay(200);
   9978 
   9979 	sc->phy.release(sc);
   9980 }
   9981 
   9982 /*
   9983  * wm_gmii_bm_readreg:	[mii interface function]
   9984  *
   9985  *	Read a PHY register on the kumeran
   9986  * This could be handled by the PHY layer if we didn't have to lock the
   9987  * ressource ...
   9988  */
   9989 static int
   9990 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   9991 {
   9992 	struct wm_softc *sc = device_private(dev);
   9993 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9994 	uint16_t val;
   9995 	int rv;
   9996 
   9997 	if (sc->phy.acquire(sc)) {
   9998 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9999 		return 0;
   10000 	}
   10001 
   10002 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10003 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10004 		    || (reg == 31)) ? 1 : phy;
   10005 	/* Page 800 works differently than the rest so it has its own func */
   10006 	if (page == BM_WUC_PAGE) {
   10007 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10008 		rv = val;
   10009 		goto release;
   10010 	}
   10011 
   10012 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10013 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10014 		    && (sc->sc_type != WM_T_82583))
   10015 			wm_gmii_mdic_writereg(dev, phy,
   10016 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10017 		else
   10018 			wm_gmii_mdic_writereg(dev, phy,
   10019 			    BME1000_PHY_PAGE_SELECT, page);
   10020 	}
   10021 
   10022 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10023 
   10024 release:
   10025 	sc->phy.release(sc);
   10026 	return rv;
   10027 }
   10028 
   10029 /*
   10030  * wm_gmii_bm_writereg:	[mii interface function]
   10031  *
   10032  *	Write a PHY register on the kumeran.
   10033  * This could be handled by the PHY layer if we didn't have to lock the
   10034  * ressource ...
   10035  */
   10036 static void
   10037 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10038 {
   10039 	struct wm_softc *sc = device_private(dev);
   10040 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10041 
   10042 	if (sc->phy.acquire(sc)) {
   10043 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10044 		return;
   10045 	}
   10046 
   10047 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10048 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10049 		    || (reg == 31)) ? 1 : phy;
   10050 	/* Page 800 works differently than the rest so it has its own func */
   10051 	if (page == BM_WUC_PAGE) {
   10052 		uint16_t tmp;
   10053 
   10054 		tmp = val;
   10055 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10056 		goto release;
   10057 	}
   10058 
   10059 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10060 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10061 		    && (sc->sc_type != WM_T_82583))
   10062 			wm_gmii_mdic_writereg(dev, phy,
   10063 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10064 		else
   10065 			wm_gmii_mdic_writereg(dev, phy,
   10066 			    BME1000_PHY_PAGE_SELECT, page);
   10067 	}
   10068 
   10069 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10070 
   10071 release:
   10072 	sc->phy.release(sc);
   10073 }
   10074 
   10075 static void
   10076 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10077 {
   10078 	struct wm_softc *sc = device_private(dev);
   10079 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10080 	uint16_t wuce, reg;
   10081 
   10082 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10083 		device_xname(dev), __func__));
   10084 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10085 	if (sc->sc_type == WM_T_PCH) {
   10086 		/* XXX e1000 driver do nothing... why? */
   10087 	}
   10088 
   10089 	/*
   10090 	 * 1) Enable PHY wakeup register first.
   10091 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10092 	 */
   10093 
   10094 	/* Set page 769 */
   10095 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10096 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10097 
   10098 	/* Read WUCE and save it */
   10099 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10100 
   10101 	reg = wuce | BM_WUC_ENABLE_BIT;
   10102 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10103 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10104 
   10105 	/* Select page 800 */
   10106 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10107 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10108 
   10109 	/*
   10110 	 * 2) Access PHY wakeup register.
   10111 	 * See e1000_access_phy_wakeup_reg_bm.
   10112 	 */
   10113 
   10114 	/* Write page 800 */
   10115 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10116 
   10117 	if (rd)
   10118 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10119 	else
   10120 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10121 
   10122 	/*
   10123 	 * 3) Disable PHY wakeup register.
   10124 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10125 	 */
   10126 	/* Set page 769 */
   10127 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10128 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10129 
   10130 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10131 }
   10132 
   10133 /*
   10134  * wm_gmii_hv_readreg:	[mii interface function]
   10135  *
   10136  *	Read a PHY register on the kumeran
   10137  * This could be handled by the PHY layer if we didn't have to lock the
   10138  * ressource ...
   10139  */
   10140 static int
   10141 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10142 {
   10143 	struct wm_softc *sc = device_private(dev);
   10144 	int rv;
   10145 
   10146 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10147 		device_xname(dev), __func__));
   10148 	if (sc->phy.acquire(sc)) {
   10149 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10150 		return 0;
   10151 	}
   10152 
   10153 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10154 	sc->phy.release(sc);
   10155 	return rv;
   10156 }
   10157 
   10158 static int
   10159 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10160 {
   10161 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10162 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10163 	uint16_t val;
   10164 	int rv;
   10165 
   10166 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10167 
   10168 	/* Page 800 works differently than the rest so it has its own func */
   10169 	if (page == BM_WUC_PAGE) {
   10170 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10171 		return val;
   10172 	}
   10173 
   10174 	/*
   10175 	 * Lower than page 768 works differently than the rest so it has its
   10176 	 * own func
   10177 	 */
   10178 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10179 		printf("gmii_hv_readreg!!!\n");
   10180 		return 0;
   10181 	}
   10182 
   10183 	/*
   10184 	 * XXX I21[789] documents say that the SMBus Address register is at
   10185 	 * PHY address 01, Page 0 (not 768), Register 26.
   10186 	 */
   10187 	if (page == HV_INTC_FC_PAGE_START)
   10188 		page = 0;
   10189 
   10190 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10191 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10192 		    page << BME1000_PAGE_SHIFT);
   10193 	}
   10194 
   10195 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10196 	return rv;
   10197 }
   10198 
   10199 /*
   10200  * wm_gmii_hv_writereg:	[mii interface function]
   10201  *
   10202  *	Write a PHY register on the kumeran.
   10203  * This could be handled by the PHY layer if we didn't have to lock the
   10204  * ressource ...
   10205  */
   10206 static void
   10207 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10208 {
   10209 	struct wm_softc *sc = device_private(dev);
   10210 
   10211 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10212 		device_xname(dev), __func__));
   10213 
   10214 	if (sc->phy.acquire(sc)) {
   10215 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10216 		return;
   10217 	}
   10218 
   10219 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10220 	sc->phy.release(sc);
   10221 }
   10222 
   10223 static void
   10224 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10225 {
   10226 	struct wm_softc *sc = device_private(dev);
   10227 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10228 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10229 
   10230 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10231 
   10232 	/* Page 800 works differently than the rest so it has its own func */
   10233 	if (page == BM_WUC_PAGE) {
   10234 		uint16_t tmp;
   10235 
   10236 		tmp = val;
   10237 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10238 		return;
   10239 	}
   10240 
   10241 	/*
   10242 	 * Lower than page 768 works differently than the rest so it has its
   10243 	 * own func
   10244 	 */
   10245 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10246 		printf("gmii_hv_writereg!!!\n");
   10247 		return;
   10248 	}
   10249 
   10250 	{
   10251 		/*
   10252 		 * XXX I21[789] documents say that the SMBus Address register
   10253 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10254 		 */
   10255 		if (page == HV_INTC_FC_PAGE_START)
   10256 			page = 0;
   10257 
   10258 		/*
   10259 		 * XXX Workaround MDIO accesses being disabled after entering
   10260 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10261 		 * register is set)
   10262 		 */
   10263 		if (sc->sc_phytype == WMPHY_82578) {
   10264 			struct mii_softc *child;
   10265 
   10266 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10267 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10268 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10269 			    && ((val & (1 << 11)) != 0)) {
   10270 				printf("XXX need workaround\n");
   10271 			}
   10272 		}
   10273 
   10274 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10275 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10276 			    page << BME1000_PAGE_SHIFT);
   10277 		}
   10278 	}
   10279 
   10280 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10281 }
   10282 
   10283 /*
   10284  * wm_gmii_82580_readreg:	[mii interface function]
   10285  *
   10286  *	Read a PHY register on the 82580 and I350.
   10287  * This could be handled by the PHY layer if we didn't have to lock the
   10288  * ressource ...
   10289  */
   10290 static int
   10291 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10292 {
   10293 	struct wm_softc *sc = device_private(dev);
   10294 	int rv;
   10295 
   10296 	if (sc->phy.acquire(sc) != 0) {
   10297 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10298 		return 0;
   10299 	}
   10300 
   10301 #ifdef DIAGNOSTIC
   10302 	if (reg > MII_ADDRMASK) {
   10303 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10304 		    __func__, sc->sc_phytype, reg);
   10305 		reg &= MII_ADDRMASK;
   10306 	}
   10307 #endif
   10308 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10309 
   10310 	sc->phy.release(sc);
   10311 	return rv;
   10312 }
   10313 
   10314 /*
   10315  * wm_gmii_82580_writereg:	[mii interface function]
   10316  *
   10317  *	Write a PHY register on the 82580 and I350.
   10318  * This could be handled by the PHY layer if we didn't have to lock the
   10319  * ressource ...
   10320  */
   10321 static void
   10322 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10323 {
   10324 	struct wm_softc *sc = device_private(dev);
   10325 
   10326 	if (sc->phy.acquire(sc) != 0) {
   10327 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10328 		return;
   10329 	}
   10330 
   10331 #ifdef DIAGNOSTIC
   10332 	if (reg > MII_ADDRMASK) {
   10333 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10334 		    __func__, sc->sc_phytype, reg);
   10335 		reg &= MII_ADDRMASK;
   10336 	}
   10337 #endif
   10338 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10339 
   10340 	sc->phy.release(sc);
   10341 }
   10342 
   10343 /*
   10344  * wm_gmii_gs40g_readreg:	[mii interface function]
   10345  *
   10346  *	Read a PHY register on the I2100 and I211.
   10347  * This could be handled by the PHY layer if we didn't have to lock the
   10348  * ressource ...
   10349  */
   10350 static int
   10351 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10352 {
   10353 	struct wm_softc *sc = device_private(dev);
   10354 	int page, offset;
   10355 	int rv;
   10356 
   10357 	/* Acquire semaphore */
   10358 	if (sc->phy.acquire(sc)) {
   10359 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10360 		return 0;
   10361 	}
   10362 
   10363 	/* Page select */
   10364 	page = reg >> GS40G_PAGE_SHIFT;
   10365 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10366 
   10367 	/* Read reg */
   10368 	offset = reg & GS40G_OFFSET_MASK;
   10369 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10370 
   10371 	sc->phy.release(sc);
   10372 	return rv;
   10373 }
   10374 
   10375 /*
   10376  * wm_gmii_gs40g_writereg:	[mii interface function]
   10377  *
   10378  *	Write a PHY register on the I210 and I211.
   10379  * This could be handled by the PHY layer if we didn't have to lock the
   10380  * ressource ...
   10381  */
   10382 static void
   10383 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10384 {
   10385 	struct wm_softc *sc = device_private(dev);
   10386 	int page, offset;
   10387 
   10388 	/* Acquire semaphore */
   10389 	if (sc->phy.acquire(sc)) {
   10390 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10391 		return;
   10392 	}
   10393 
   10394 	/* Page select */
   10395 	page = reg >> GS40G_PAGE_SHIFT;
   10396 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10397 
   10398 	/* Write reg */
   10399 	offset = reg & GS40G_OFFSET_MASK;
   10400 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10401 
   10402 	/* Release semaphore */
   10403 	sc->phy.release(sc);
   10404 }
   10405 
   10406 /*
   10407  * wm_gmii_statchg:	[mii interface function]
   10408  *
   10409  *	Callback from MII layer when media changes.
   10410  */
   10411 static void
   10412 wm_gmii_statchg(struct ifnet *ifp)
   10413 {
   10414 	struct wm_softc *sc = ifp->if_softc;
   10415 	struct mii_data *mii = &sc->sc_mii;
   10416 
   10417 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10418 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10419 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10420 
   10421 	/*
   10422 	 * Get flow control negotiation result.
   10423 	 */
   10424 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10425 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10426 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10427 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10428 	}
   10429 
   10430 	if (sc->sc_flowflags & IFM_FLOW) {
   10431 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10432 			sc->sc_ctrl |= CTRL_TFCE;
   10433 			sc->sc_fcrtl |= FCRTL_XONE;
   10434 		}
   10435 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10436 			sc->sc_ctrl |= CTRL_RFCE;
   10437 	}
   10438 
   10439 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10440 		DPRINTF(WM_DEBUG_LINK,
   10441 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10442 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10443 	} else {
   10444 		DPRINTF(WM_DEBUG_LINK,
   10445 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10446 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10447 	}
   10448 
   10449 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10450 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10451 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10452 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10453 	if (sc->sc_type == WM_T_80003) {
   10454 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10455 		case IFM_1000_T:
   10456 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10457 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10458 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10459 			break;
   10460 		default:
   10461 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10462 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10463 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10464 			break;
   10465 		}
   10466 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10467 	}
   10468 }
   10469 
   10470 /* kumeran related (80003, ICH* and PCH*) */
   10471 
   10472 /*
   10473  * wm_kmrn_readreg:
   10474  *
   10475  *	Read a kumeran register
   10476  */
   10477 static int
   10478 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10479 {
   10480 	int rv;
   10481 
   10482 	if (sc->sc_type == WM_T_80003)
   10483 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10484 	else
   10485 		rv = sc->phy.acquire(sc);
   10486 	if (rv != 0) {
   10487 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10488 		    __func__);
   10489 		return 0;
   10490 	}
   10491 
   10492 	rv = wm_kmrn_readreg_locked(sc, reg);
   10493 
   10494 	if (sc->sc_type == WM_T_80003)
   10495 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10496 	else
   10497 		sc->phy.release(sc);
   10498 
   10499 	return rv;
   10500 }
   10501 
   10502 static int
   10503 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10504 {
   10505 	int rv;
   10506 
   10507 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10508 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10509 	    KUMCTRLSTA_REN);
   10510 	CSR_WRITE_FLUSH(sc);
   10511 	delay(2);
   10512 
   10513 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10514 
   10515 	return rv;
   10516 }
   10517 
   10518 /*
   10519  * wm_kmrn_writereg:
   10520  *
   10521  *	Write a kumeran register
   10522  */
   10523 static void
   10524 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10525 {
   10526 	int rv;
   10527 
   10528 	if (sc->sc_type == WM_T_80003)
   10529 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10530 	else
   10531 		rv = sc->phy.acquire(sc);
   10532 	if (rv != 0) {
   10533 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10534 		    __func__);
   10535 		return;
   10536 	}
   10537 
   10538 	wm_kmrn_writereg_locked(sc, reg, val);
   10539 
   10540 	if (sc->sc_type == WM_T_80003)
   10541 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10542 	else
   10543 		sc->phy.release(sc);
   10544 }
   10545 
   10546 static void
   10547 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10548 {
   10549 
   10550 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10551 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10552 	    (val & KUMCTRLSTA_MASK));
   10553 }
   10554 
   10555 /* SGMII related */
   10556 
   10557 /*
   10558  * wm_sgmii_uses_mdio
   10559  *
   10560  * Check whether the transaction is to the internal PHY or the external
   10561  * MDIO interface. Return true if it's MDIO.
   10562  */
   10563 static bool
   10564 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10565 {
   10566 	uint32_t reg;
   10567 	bool ismdio = false;
   10568 
   10569 	switch (sc->sc_type) {
   10570 	case WM_T_82575:
   10571 	case WM_T_82576:
   10572 		reg = CSR_READ(sc, WMREG_MDIC);
   10573 		ismdio = ((reg & MDIC_DEST) != 0);
   10574 		break;
   10575 	case WM_T_82580:
   10576 	case WM_T_I350:
   10577 	case WM_T_I354:
   10578 	case WM_T_I210:
   10579 	case WM_T_I211:
   10580 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10581 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10582 		break;
   10583 	default:
   10584 		break;
   10585 	}
   10586 
   10587 	return ismdio;
   10588 }
   10589 
   10590 /*
   10591  * wm_sgmii_readreg:	[mii interface function]
   10592  *
   10593  *	Read a PHY register on the SGMII
   10594  * This could be handled by the PHY layer if we didn't have to lock the
   10595  * ressource ...
   10596  */
   10597 static int
   10598 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10599 {
   10600 	struct wm_softc *sc = device_private(dev);
   10601 	uint32_t i2ccmd;
   10602 	int i, rv;
   10603 
   10604 	if (sc->phy.acquire(sc)) {
   10605 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10606 		return 0;
   10607 	}
   10608 
   10609 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10610 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10611 	    | I2CCMD_OPCODE_READ;
   10612 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10613 
   10614 	/* Poll the ready bit */
   10615 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10616 		delay(50);
   10617 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10618 		if (i2ccmd & I2CCMD_READY)
   10619 			break;
   10620 	}
   10621 	if ((i2ccmd & I2CCMD_READY) == 0)
   10622 		device_printf(dev, "I2CCMD Read did not complete\n");
   10623 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10624 		device_printf(dev, "I2CCMD Error bit set\n");
   10625 
   10626 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10627 
   10628 	sc->phy.release(sc);
   10629 	return rv;
   10630 }
   10631 
   10632 /*
   10633  * wm_sgmii_writereg:	[mii interface function]
   10634  *
   10635  *	Write a PHY register on the SGMII.
   10636  * This could be handled by the PHY layer if we didn't have to lock the
   10637  * ressource ...
   10638  */
   10639 static void
   10640 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10641 {
   10642 	struct wm_softc *sc = device_private(dev);
   10643 	uint32_t i2ccmd;
   10644 	int i;
   10645 	int val_swapped;
   10646 
   10647 	if (sc->phy.acquire(sc) != 0) {
   10648 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10649 		return;
   10650 	}
   10651 	/* Swap the data bytes for the I2C interface */
   10652 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10653 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10654 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10655 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10656 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10657 
   10658 	/* Poll the ready bit */
   10659 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10660 		delay(50);
   10661 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10662 		if (i2ccmd & I2CCMD_READY)
   10663 			break;
   10664 	}
   10665 	if ((i2ccmd & I2CCMD_READY) == 0)
   10666 		device_printf(dev, "I2CCMD Write did not complete\n");
   10667 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10668 		device_printf(dev, "I2CCMD Error bit set\n");
   10669 
   10670 	sc->phy.release(sc);
   10671 }
   10672 
   10673 /* TBI related */
   10674 
   10675 /*
   10676  * wm_tbi_mediainit:
   10677  *
   10678  *	Initialize media for use on 1000BASE-X devices.
   10679  */
   10680 static void
   10681 wm_tbi_mediainit(struct wm_softc *sc)
   10682 {
   10683 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10684 	const char *sep = "";
   10685 
   10686 	if (sc->sc_type < WM_T_82543)
   10687 		sc->sc_tipg = TIPG_WM_DFLT;
   10688 	else
   10689 		sc->sc_tipg = TIPG_LG_DFLT;
   10690 
   10691 	sc->sc_tbi_serdes_anegticks = 5;
   10692 
   10693 	/* Initialize our media structures */
   10694 	sc->sc_mii.mii_ifp = ifp;
   10695 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10696 
   10697 	if ((sc->sc_type >= WM_T_82575)
   10698 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10699 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10700 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10701 	else
   10702 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10703 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10704 
   10705 	/*
   10706 	 * SWD Pins:
   10707 	 *
   10708 	 *	0 = Link LED (output)
   10709 	 *	1 = Loss Of Signal (input)
   10710 	 */
   10711 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10712 
   10713 	/* XXX Perhaps this is only for TBI */
   10714 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10715 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10716 
   10717 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10718 		sc->sc_ctrl &= ~CTRL_LRST;
   10719 
   10720 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10721 
   10722 #define	ADD(ss, mm, dd)							\
   10723 do {									\
   10724 	aprint_normal("%s%s", sep, ss);					\
   10725 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10726 	sep = ", ";							\
   10727 } while (/*CONSTCOND*/0)
   10728 
   10729 	aprint_normal_dev(sc->sc_dev, "");
   10730 
   10731 	if (sc->sc_type == WM_T_I354) {
   10732 		uint32_t status;
   10733 
   10734 		status = CSR_READ(sc, WMREG_STATUS);
   10735 		if (((status & STATUS_2P5_SKU) != 0)
   10736 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10737 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10738 		} else
   10739 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10740 	} else if (sc->sc_type == WM_T_82545) {
   10741 		/* Only 82545 is LX (XXX except SFP) */
   10742 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10743 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10744 	} else {
   10745 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10746 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10747 	}
   10748 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10749 	aprint_normal("\n");
   10750 
   10751 #undef ADD
   10752 
   10753 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10754 }
   10755 
   10756 /*
   10757  * wm_tbi_mediachange:	[ifmedia interface function]
   10758  *
   10759  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10760  */
   10761 static int
   10762 wm_tbi_mediachange(struct ifnet *ifp)
   10763 {
   10764 	struct wm_softc *sc = ifp->if_softc;
   10765 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10766 	uint32_t status;
   10767 	int i;
   10768 
   10769 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10770 		/* XXX need some work for >= 82571 and < 82575 */
   10771 		if (sc->sc_type < WM_T_82575)
   10772 			return 0;
   10773 	}
   10774 
   10775 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10776 	    || (sc->sc_type >= WM_T_82575))
   10777 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10778 
   10779 	sc->sc_ctrl &= ~CTRL_LRST;
   10780 	sc->sc_txcw = TXCW_ANE;
   10781 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10782 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10783 	else if (ife->ifm_media & IFM_FDX)
   10784 		sc->sc_txcw |= TXCW_FD;
   10785 	else
   10786 		sc->sc_txcw |= TXCW_HD;
   10787 
   10788 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10789 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10790 
   10791 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10792 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10793 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10794 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10795 	CSR_WRITE_FLUSH(sc);
   10796 	delay(1000);
   10797 
   10798 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10799 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10800 
   10801 	/*
   10802 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10803 	 * optics detect a signal, 0 if they don't.
   10804 	 */
   10805 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10806 		/* Have signal; wait for the link to come up. */
   10807 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10808 			delay(10000);
   10809 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10810 				break;
   10811 		}
   10812 
   10813 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10814 			    device_xname(sc->sc_dev),i));
   10815 
   10816 		status = CSR_READ(sc, WMREG_STATUS);
   10817 		DPRINTF(WM_DEBUG_LINK,
   10818 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10819 			device_xname(sc->sc_dev),status, STATUS_LU));
   10820 		if (status & STATUS_LU) {
   10821 			/* Link is up. */
   10822 			DPRINTF(WM_DEBUG_LINK,
   10823 			    ("%s: LINK: set media -> link up %s\n",
   10824 			    device_xname(sc->sc_dev),
   10825 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10826 
   10827 			/*
   10828 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10829 			 * so we should update sc->sc_ctrl
   10830 			 */
   10831 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10832 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10833 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10834 			if (status & STATUS_FD)
   10835 				sc->sc_tctl |=
   10836 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10837 			else
   10838 				sc->sc_tctl |=
   10839 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10840 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10841 				sc->sc_fcrtl |= FCRTL_XONE;
   10842 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10843 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10844 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10845 				      sc->sc_fcrtl);
   10846 			sc->sc_tbi_linkup = 1;
   10847 		} else {
   10848 			if (i == WM_LINKUP_TIMEOUT)
   10849 				wm_check_for_link(sc);
   10850 			/* Link is down. */
   10851 			DPRINTF(WM_DEBUG_LINK,
   10852 			    ("%s: LINK: set media -> link down\n",
   10853 			    device_xname(sc->sc_dev)));
   10854 			sc->sc_tbi_linkup = 0;
   10855 		}
   10856 	} else {
   10857 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10858 		    device_xname(sc->sc_dev)));
   10859 		sc->sc_tbi_linkup = 0;
   10860 	}
   10861 
   10862 	wm_tbi_serdes_set_linkled(sc);
   10863 
   10864 	return 0;
   10865 }
   10866 
   10867 /*
   10868  * wm_tbi_mediastatus:	[ifmedia interface function]
   10869  *
   10870  *	Get the current interface media status on a 1000BASE-X device.
   10871  */
   10872 static void
   10873 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10874 {
   10875 	struct wm_softc *sc = ifp->if_softc;
   10876 	uint32_t ctrl, status;
   10877 
   10878 	ifmr->ifm_status = IFM_AVALID;
   10879 	ifmr->ifm_active = IFM_ETHER;
   10880 
   10881 	status = CSR_READ(sc, WMREG_STATUS);
   10882 	if ((status & STATUS_LU) == 0) {
   10883 		ifmr->ifm_active |= IFM_NONE;
   10884 		return;
   10885 	}
   10886 
   10887 	ifmr->ifm_status |= IFM_ACTIVE;
   10888 	/* Only 82545 is LX */
   10889 	if (sc->sc_type == WM_T_82545)
   10890 		ifmr->ifm_active |= IFM_1000_LX;
   10891 	else
   10892 		ifmr->ifm_active |= IFM_1000_SX;
   10893 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10894 		ifmr->ifm_active |= IFM_FDX;
   10895 	else
   10896 		ifmr->ifm_active |= IFM_HDX;
   10897 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10898 	if (ctrl & CTRL_RFCE)
   10899 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10900 	if (ctrl & CTRL_TFCE)
   10901 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10902 }
   10903 
   10904 /* XXX TBI only */
   10905 static int
   10906 wm_check_for_link(struct wm_softc *sc)
   10907 {
   10908 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10909 	uint32_t rxcw;
   10910 	uint32_t ctrl;
   10911 	uint32_t status;
   10912 	uint32_t sig;
   10913 
   10914 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10915 		/* XXX need some work for >= 82571 */
   10916 		if (sc->sc_type >= WM_T_82571) {
   10917 			sc->sc_tbi_linkup = 1;
   10918 			return 0;
   10919 		}
   10920 	}
   10921 
   10922 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10923 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10924 	status = CSR_READ(sc, WMREG_STATUS);
   10925 
   10926 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10927 
   10928 	DPRINTF(WM_DEBUG_LINK,
   10929 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10930 		device_xname(sc->sc_dev), __func__,
   10931 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10932 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10933 
   10934 	/*
   10935 	 * SWDPIN   LU RXCW
   10936 	 *      0    0    0
   10937 	 *      0    0    1	(should not happen)
   10938 	 *      0    1    0	(should not happen)
   10939 	 *      0    1    1	(should not happen)
   10940 	 *      1    0    0	Disable autonego and force linkup
   10941 	 *      1    0    1	got /C/ but not linkup yet
   10942 	 *      1    1    0	(linkup)
   10943 	 *      1    1    1	If IFM_AUTO, back to autonego
   10944 	 *
   10945 	 */
   10946 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10947 	    && ((status & STATUS_LU) == 0)
   10948 	    && ((rxcw & RXCW_C) == 0)) {
   10949 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10950 			__func__));
   10951 		sc->sc_tbi_linkup = 0;
   10952 		/* Disable auto-negotiation in the TXCW register */
   10953 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10954 
   10955 		/*
   10956 		 * Force link-up and also force full-duplex.
   10957 		 *
   10958 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10959 		 * so we should update sc->sc_ctrl
   10960 		 */
   10961 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10962 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10963 	} else if (((status & STATUS_LU) != 0)
   10964 	    && ((rxcw & RXCW_C) != 0)
   10965 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10966 		sc->sc_tbi_linkup = 1;
   10967 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10968 			__func__));
   10969 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10970 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10971 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10972 	    && ((rxcw & RXCW_C) != 0)) {
   10973 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10974 	} else {
   10975 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10976 			status));
   10977 	}
   10978 
   10979 	return 0;
   10980 }
   10981 
   10982 /*
   10983  * wm_tbi_tick:
   10984  *
   10985  *	Check the link on TBI devices.
   10986  *	This function acts as mii_tick().
   10987  */
   10988 static void
   10989 wm_tbi_tick(struct wm_softc *sc)
   10990 {
   10991 	struct mii_data *mii = &sc->sc_mii;
   10992 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10993 	uint32_t status;
   10994 
   10995 	KASSERT(WM_CORE_LOCKED(sc));
   10996 
   10997 	status = CSR_READ(sc, WMREG_STATUS);
   10998 
   10999 	/* XXX is this needed? */
   11000 	(void)CSR_READ(sc, WMREG_RXCW);
   11001 	(void)CSR_READ(sc, WMREG_CTRL);
   11002 
   11003 	/* set link status */
   11004 	if ((status & STATUS_LU) == 0) {
   11005 		DPRINTF(WM_DEBUG_LINK,
   11006 		    ("%s: LINK: checklink -> down\n",
   11007 			device_xname(sc->sc_dev)));
   11008 		sc->sc_tbi_linkup = 0;
   11009 	} else if (sc->sc_tbi_linkup == 0) {
   11010 		DPRINTF(WM_DEBUG_LINK,
   11011 		    ("%s: LINK: checklink -> up %s\n",
   11012 			device_xname(sc->sc_dev),
   11013 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11014 		sc->sc_tbi_linkup = 1;
   11015 		sc->sc_tbi_serdes_ticks = 0;
   11016 	}
   11017 
   11018 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11019 		goto setled;
   11020 
   11021 	if ((status & STATUS_LU) == 0) {
   11022 		sc->sc_tbi_linkup = 0;
   11023 		/* If the timer expired, retry autonegotiation */
   11024 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11025 		    && (++sc->sc_tbi_serdes_ticks
   11026 			>= sc->sc_tbi_serdes_anegticks)) {
   11027 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11028 			sc->sc_tbi_serdes_ticks = 0;
   11029 			/*
   11030 			 * Reset the link, and let autonegotiation do
   11031 			 * its thing
   11032 			 */
   11033 			sc->sc_ctrl |= CTRL_LRST;
   11034 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11035 			CSR_WRITE_FLUSH(sc);
   11036 			delay(1000);
   11037 			sc->sc_ctrl &= ~CTRL_LRST;
   11038 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11039 			CSR_WRITE_FLUSH(sc);
   11040 			delay(1000);
   11041 			CSR_WRITE(sc, WMREG_TXCW,
   11042 			    sc->sc_txcw & ~TXCW_ANE);
   11043 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11044 		}
   11045 	}
   11046 
   11047 setled:
   11048 	wm_tbi_serdes_set_linkled(sc);
   11049 }
   11050 
   11051 /* SERDES related */
   11052 static void
   11053 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11054 {
   11055 	uint32_t reg;
   11056 
   11057 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11058 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11059 		return;
   11060 
   11061 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11062 	reg |= PCS_CFG_PCS_EN;
   11063 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11064 
   11065 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11066 	reg &= ~CTRL_EXT_SWDPIN(3);
   11067 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11068 	CSR_WRITE_FLUSH(sc);
   11069 }
   11070 
   11071 static int
   11072 wm_serdes_mediachange(struct ifnet *ifp)
   11073 {
   11074 	struct wm_softc *sc = ifp->if_softc;
   11075 	bool pcs_autoneg = true; /* XXX */
   11076 	uint32_t ctrl_ext, pcs_lctl, reg;
   11077 
   11078 	/* XXX Currently, this function is not called on 8257[12] */
   11079 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11080 	    || (sc->sc_type >= WM_T_82575))
   11081 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11082 
   11083 	wm_serdes_power_up_link_82575(sc);
   11084 
   11085 	sc->sc_ctrl |= CTRL_SLU;
   11086 
   11087 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11088 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11089 
   11090 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11091 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11092 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11093 	case CTRL_EXT_LINK_MODE_SGMII:
   11094 		pcs_autoneg = true;
   11095 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11096 		break;
   11097 	case CTRL_EXT_LINK_MODE_1000KX:
   11098 		pcs_autoneg = false;
   11099 		/* FALLTHROUGH */
   11100 	default:
   11101 		if ((sc->sc_type == WM_T_82575)
   11102 		    || (sc->sc_type == WM_T_82576)) {
   11103 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11104 				pcs_autoneg = false;
   11105 		}
   11106 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11107 		    | CTRL_FRCFDX;
   11108 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11109 	}
   11110 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11111 
   11112 	if (pcs_autoneg) {
   11113 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11114 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11115 
   11116 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11117 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11118 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11119 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11120 	} else
   11121 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11122 
   11123 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11124 
   11125 
   11126 	return 0;
   11127 }
   11128 
   11129 static void
   11130 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11131 {
   11132 	struct wm_softc *sc = ifp->if_softc;
   11133 	struct mii_data *mii = &sc->sc_mii;
   11134 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11135 	uint32_t pcs_adv, pcs_lpab, reg;
   11136 
   11137 	ifmr->ifm_status = IFM_AVALID;
   11138 	ifmr->ifm_active = IFM_ETHER;
   11139 
   11140 	/* Check PCS */
   11141 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11142 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11143 		ifmr->ifm_active |= IFM_NONE;
   11144 		sc->sc_tbi_linkup = 0;
   11145 		goto setled;
   11146 	}
   11147 
   11148 	sc->sc_tbi_linkup = 1;
   11149 	ifmr->ifm_status |= IFM_ACTIVE;
   11150 	if (sc->sc_type == WM_T_I354) {
   11151 		uint32_t status;
   11152 
   11153 		status = CSR_READ(sc, WMREG_STATUS);
   11154 		if (((status & STATUS_2P5_SKU) != 0)
   11155 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11156 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11157 		} else
   11158 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11159 	} else {
   11160 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11161 		case PCS_LSTS_SPEED_10:
   11162 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11163 			break;
   11164 		case PCS_LSTS_SPEED_100:
   11165 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11166 			break;
   11167 		case PCS_LSTS_SPEED_1000:
   11168 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11169 			break;
   11170 		default:
   11171 			device_printf(sc->sc_dev, "Unknown speed\n");
   11172 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11173 			break;
   11174 		}
   11175 	}
   11176 	if ((reg & PCS_LSTS_FDX) != 0)
   11177 		ifmr->ifm_active |= IFM_FDX;
   11178 	else
   11179 		ifmr->ifm_active |= IFM_HDX;
   11180 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11181 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11182 		/* Check flow */
   11183 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11184 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11185 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11186 			goto setled;
   11187 		}
   11188 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11189 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11190 		DPRINTF(WM_DEBUG_LINK,
   11191 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11192 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11193 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11194 			mii->mii_media_active |= IFM_FLOW
   11195 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11196 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11197 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11198 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11199 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11200 			mii->mii_media_active |= IFM_FLOW
   11201 			    | IFM_ETH_TXPAUSE;
   11202 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11203 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11204 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11205 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11206 			mii->mii_media_active |= IFM_FLOW
   11207 			    | IFM_ETH_RXPAUSE;
   11208 		}
   11209 	}
   11210 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11211 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11212 setled:
   11213 	wm_tbi_serdes_set_linkled(sc);
   11214 }
   11215 
   11216 /*
   11217  * wm_serdes_tick:
   11218  *
   11219  *	Check the link on serdes devices.
   11220  */
   11221 static void
   11222 wm_serdes_tick(struct wm_softc *sc)
   11223 {
   11224 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11225 	struct mii_data *mii = &sc->sc_mii;
   11226 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11227 	uint32_t reg;
   11228 
   11229 	KASSERT(WM_CORE_LOCKED(sc));
   11230 
   11231 	mii->mii_media_status = IFM_AVALID;
   11232 	mii->mii_media_active = IFM_ETHER;
   11233 
   11234 	/* Check PCS */
   11235 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11236 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11237 		mii->mii_media_status |= IFM_ACTIVE;
   11238 		sc->sc_tbi_linkup = 1;
   11239 		sc->sc_tbi_serdes_ticks = 0;
   11240 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11241 		if ((reg & PCS_LSTS_FDX) != 0)
   11242 			mii->mii_media_active |= IFM_FDX;
   11243 		else
   11244 			mii->mii_media_active |= IFM_HDX;
   11245 	} else {
   11246 		mii->mii_media_status |= IFM_NONE;
   11247 		sc->sc_tbi_linkup = 0;
   11248 		/* If the timer expired, retry autonegotiation */
   11249 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11250 		    && (++sc->sc_tbi_serdes_ticks
   11251 			>= sc->sc_tbi_serdes_anegticks)) {
   11252 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11253 			sc->sc_tbi_serdes_ticks = 0;
   11254 			/* XXX */
   11255 			wm_serdes_mediachange(ifp);
   11256 		}
   11257 	}
   11258 
   11259 	wm_tbi_serdes_set_linkled(sc);
   11260 }
   11261 
   11262 /* SFP related */
   11263 
   11264 static int
   11265 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11266 {
   11267 	uint32_t i2ccmd;
   11268 	int i;
   11269 
   11270 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11271 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11272 
   11273 	/* Poll the ready bit */
   11274 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11275 		delay(50);
   11276 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11277 		if (i2ccmd & I2CCMD_READY)
   11278 			break;
   11279 	}
   11280 	if ((i2ccmd & I2CCMD_READY) == 0)
   11281 		return -1;
   11282 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11283 		return -1;
   11284 
   11285 	*data = i2ccmd & 0x00ff;
   11286 
   11287 	return 0;
   11288 }
   11289 
   11290 static uint32_t
   11291 wm_sfp_get_media_type(struct wm_softc *sc)
   11292 {
   11293 	uint32_t ctrl_ext;
   11294 	uint8_t val = 0;
   11295 	int timeout = 3;
   11296 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11297 	int rv = -1;
   11298 
   11299 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11300 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11301 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11302 	CSR_WRITE_FLUSH(sc);
   11303 
   11304 	/* Read SFP module data */
   11305 	while (timeout) {
   11306 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11307 		if (rv == 0)
   11308 			break;
   11309 		delay(100*1000); /* XXX too big */
   11310 		timeout--;
   11311 	}
   11312 	if (rv != 0)
   11313 		goto out;
   11314 	switch (val) {
   11315 	case SFF_SFP_ID_SFF:
   11316 		aprint_normal_dev(sc->sc_dev,
   11317 		    "Module/Connector soldered to board\n");
   11318 		break;
   11319 	case SFF_SFP_ID_SFP:
   11320 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11321 		break;
   11322 	case SFF_SFP_ID_UNKNOWN:
   11323 		goto out;
   11324 	default:
   11325 		break;
   11326 	}
   11327 
   11328 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11329 	if (rv != 0) {
   11330 		goto out;
   11331 	}
   11332 
   11333 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11334 		mediatype = WM_MEDIATYPE_SERDES;
   11335 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11336 		sc->sc_flags |= WM_F_SGMII;
   11337 		mediatype = WM_MEDIATYPE_COPPER;
   11338 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11339 		sc->sc_flags |= WM_F_SGMII;
   11340 		mediatype = WM_MEDIATYPE_SERDES;
   11341 	}
   11342 
   11343 out:
   11344 	/* Restore I2C interface setting */
   11345 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11346 
   11347 	return mediatype;
   11348 }
   11349 
   11350 /*
   11351  * NVM related.
   11352  * Microwire, SPI (w/wo EERD) and Flash.
   11353  */
   11354 
   11355 /* Both spi and uwire */
   11356 
   11357 /*
   11358  * wm_eeprom_sendbits:
   11359  *
   11360  *	Send a series of bits to the EEPROM.
   11361  */
   11362 static void
   11363 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11364 {
   11365 	uint32_t reg;
   11366 	int x;
   11367 
   11368 	reg = CSR_READ(sc, WMREG_EECD);
   11369 
   11370 	for (x = nbits; x > 0; x--) {
   11371 		if (bits & (1U << (x - 1)))
   11372 			reg |= EECD_DI;
   11373 		else
   11374 			reg &= ~EECD_DI;
   11375 		CSR_WRITE(sc, WMREG_EECD, reg);
   11376 		CSR_WRITE_FLUSH(sc);
   11377 		delay(2);
   11378 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11379 		CSR_WRITE_FLUSH(sc);
   11380 		delay(2);
   11381 		CSR_WRITE(sc, WMREG_EECD, reg);
   11382 		CSR_WRITE_FLUSH(sc);
   11383 		delay(2);
   11384 	}
   11385 }
   11386 
   11387 /*
   11388  * wm_eeprom_recvbits:
   11389  *
   11390  *	Receive a series of bits from the EEPROM.
   11391  */
   11392 static void
   11393 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11394 {
   11395 	uint32_t reg, val;
   11396 	int x;
   11397 
   11398 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11399 
   11400 	val = 0;
   11401 	for (x = nbits; x > 0; x--) {
   11402 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11403 		CSR_WRITE_FLUSH(sc);
   11404 		delay(2);
   11405 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11406 			val |= (1U << (x - 1));
   11407 		CSR_WRITE(sc, WMREG_EECD, reg);
   11408 		CSR_WRITE_FLUSH(sc);
   11409 		delay(2);
   11410 	}
   11411 	*valp = val;
   11412 }
   11413 
   11414 /* Microwire */
   11415 
   11416 /*
   11417  * wm_nvm_read_uwire:
   11418  *
   11419  *	Read a word from the EEPROM using the MicroWire protocol.
   11420  */
   11421 static int
   11422 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11423 {
   11424 	uint32_t reg, val;
   11425 	int i;
   11426 
   11427 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11428 		device_xname(sc->sc_dev), __func__));
   11429 
   11430 	for (i = 0; i < wordcnt; i++) {
   11431 		/* Clear SK and DI. */
   11432 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11433 		CSR_WRITE(sc, WMREG_EECD, reg);
   11434 
   11435 		/*
   11436 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11437 		 * and Xen.
   11438 		 *
   11439 		 * We use this workaround only for 82540 because qemu's
   11440 		 * e1000 act as 82540.
   11441 		 */
   11442 		if (sc->sc_type == WM_T_82540) {
   11443 			reg |= EECD_SK;
   11444 			CSR_WRITE(sc, WMREG_EECD, reg);
   11445 			reg &= ~EECD_SK;
   11446 			CSR_WRITE(sc, WMREG_EECD, reg);
   11447 			CSR_WRITE_FLUSH(sc);
   11448 			delay(2);
   11449 		}
   11450 		/* XXX: end of workaround */
   11451 
   11452 		/* Set CHIP SELECT. */
   11453 		reg |= EECD_CS;
   11454 		CSR_WRITE(sc, WMREG_EECD, reg);
   11455 		CSR_WRITE_FLUSH(sc);
   11456 		delay(2);
   11457 
   11458 		/* Shift in the READ command. */
   11459 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11460 
   11461 		/* Shift in address. */
   11462 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11463 
   11464 		/* Shift out the data. */
   11465 		wm_eeprom_recvbits(sc, &val, 16);
   11466 		data[i] = val & 0xffff;
   11467 
   11468 		/* Clear CHIP SELECT. */
   11469 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11470 		CSR_WRITE(sc, WMREG_EECD, reg);
   11471 		CSR_WRITE_FLUSH(sc);
   11472 		delay(2);
   11473 	}
   11474 
   11475 	return 0;
   11476 }
   11477 
   11478 /* SPI */
   11479 
   11480 /*
   11481  * Set SPI and FLASH related information from the EECD register.
   11482  * For 82541 and 82547, the word size is taken from EEPROM.
   11483  */
   11484 static int
   11485 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11486 {
   11487 	int size;
   11488 	uint32_t reg;
   11489 	uint16_t data;
   11490 
   11491 	reg = CSR_READ(sc, WMREG_EECD);
   11492 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11493 
   11494 	/* Read the size of NVM from EECD by default */
   11495 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11496 	switch (sc->sc_type) {
   11497 	case WM_T_82541:
   11498 	case WM_T_82541_2:
   11499 	case WM_T_82547:
   11500 	case WM_T_82547_2:
   11501 		/* Set dummy value to access EEPROM */
   11502 		sc->sc_nvm_wordsize = 64;
   11503 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11504 		reg = data;
   11505 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11506 		if (size == 0)
   11507 			size = 6; /* 64 word size */
   11508 		else
   11509 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11510 		break;
   11511 	case WM_T_80003:
   11512 	case WM_T_82571:
   11513 	case WM_T_82572:
   11514 	case WM_T_82573: /* SPI case */
   11515 	case WM_T_82574: /* SPI case */
   11516 	case WM_T_82583: /* SPI case */
   11517 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11518 		if (size > 14)
   11519 			size = 14;
   11520 		break;
   11521 	case WM_T_82575:
   11522 	case WM_T_82576:
   11523 	case WM_T_82580:
   11524 	case WM_T_I350:
   11525 	case WM_T_I354:
   11526 	case WM_T_I210:
   11527 	case WM_T_I211:
   11528 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11529 		if (size > 15)
   11530 			size = 15;
   11531 		break;
   11532 	default:
   11533 		aprint_error_dev(sc->sc_dev,
   11534 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11535 		return -1;
   11536 		break;
   11537 	}
   11538 
   11539 	sc->sc_nvm_wordsize = 1 << size;
   11540 
   11541 	return 0;
   11542 }
   11543 
   11544 /*
   11545  * wm_nvm_ready_spi:
   11546  *
   11547  *	Wait for a SPI EEPROM to be ready for commands.
   11548  */
   11549 static int
   11550 wm_nvm_ready_spi(struct wm_softc *sc)
   11551 {
   11552 	uint32_t val;
   11553 	int usec;
   11554 
   11555 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11556 		device_xname(sc->sc_dev), __func__));
   11557 
   11558 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11559 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11560 		wm_eeprom_recvbits(sc, &val, 8);
   11561 		if ((val & SPI_SR_RDY) == 0)
   11562 			break;
   11563 	}
   11564 	if (usec >= SPI_MAX_RETRIES) {
   11565 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11566 		return 1;
   11567 	}
   11568 	return 0;
   11569 }
   11570 
   11571 /*
   11572  * wm_nvm_read_spi:
   11573  *
   11574  *	Read a work from the EEPROM using the SPI protocol.
   11575  */
   11576 static int
   11577 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11578 {
   11579 	uint32_t reg, val;
   11580 	int i;
   11581 	uint8_t opc;
   11582 
   11583 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11584 		device_xname(sc->sc_dev), __func__));
   11585 
   11586 	/* Clear SK and CS. */
   11587 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11588 	CSR_WRITE(sc, WMREG_EECD, reg);
   11589 	CSR_WRITE_FLUSH(sc);
   11590 	delay(2);
   11591 
   11592 	if (wm_nvm_ready_spi(sc))
   11593 		return 1;
   11594 
   11595 	/* Toggle CS to flush commands. */
   11596 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11597 	CSR_WRITE_FLUSH(sc);
   11598 	delay(2);
   11599 	CSR_WRITE(sc, WMREG_EECD, reg);
   11600 	CSR_WRITE_FLUSH(sc);
   11601 	delay(2);
   11602 
   11603 	opc = SPI_OPC_READ;
   11604 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11605 		opc |= SPI_OPC_A8;
   11606 
   11607 	wm_eeprom_sendbits(sc, opc, 8);
   11608 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11609 
   11610 	for (i = 0; i < wordcnt; i++) {
   11611 		wm_eeprom_recvbits(sc, &val, 16);
   11612 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11613 	}
   11614 
   11615 	/* Raise CS and clear SK. */
   11616 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11617 	CSR_WRITE(sc, WMREG_EECD, reg);
   11618 	CSR_WRITE_FLUSH(sc);
   11619 	delay(2);
   11620 
   11621 	return 0;
   11622 }
   11623 
   11624 /* Using with EERD */
   11625 
   11626 static int
   11627 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11628 {
   11629 	uint32_t attempts = 100000;
   11630 	uint32_t i, reg = 0;
   11631 	int32_t done = -1;
   11632 
   11633 	for (i = 0; i < attempts; i++) {
   11634 		reg = CSR_READ(sc, rw);
   11635 
   11636 		if (reg & EERD_DONE) {
   11637 			done = 0;
   11638 			break;
   11639 		}
   11640 		delay(5);
   11641 	}
   11642 
   11643 	return done;
   11644 }
   11645 
   11646 static int
   11647 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11648     uint16_t *data)
   11649 {
   11650 	int i, eerd = 0;
   11651 	int error = 0;
   11652 
   11653 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11654 		device_xname(sc->sc_dev), __func__));
   11655 
   11656 	for (i = 0; i < wordcnt; i++) {
   11657 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11658 
   11659 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11660 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11661 		if (error != 0)
   11662 			break;
   11663 
   11664 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11665 	}
   11666 
   11667 	return error;
   11668 }
   11669 
   11670 /* Flash */
   11671 
   11672 static int
   11673 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11674 {
   11675 	uint32_t eecd;
   11676 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11677 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11678 	uint8_t sig_byte = 0;
   11679 
   11680 	switch (sc->sc_type) {
   11681 	case WM_T_PCH_SPT:
   11682 		/*
   11683 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11684 		 * sector valid bits from the NVM.
   11685 		 */
   11686 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11687 		if ((*bank == 0) || (*bank == 1)) {
   11688 			aprint_error_dev(sc->sc_dev,
   11689 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11690 				*bank);
   11691 			return -1;
   11692 		} else {
   11693 			*bank = *bank - 2;
   11694 			return 0;
   11695 		}
   11696 	case WM_T_ICH8:
   11697 	case WM_T_ICH9:
   11698 		eecd = CSR_READ(sc, WMREG_EECD);
   11699 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11700 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11701 			return 0;
   11702 		}
   11703 		/* FALLTHROUGH */
   11704 	default:
   11705 		/* Default to 0 */
   11706 		*bank = 0;
   11707 
   11708 		/* Check bank 0 */
   11709 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11710 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11711 			*bank = 0;
   11712 			return 0;
   11713 		}
   11714 
   11715 		/* Check bank 1 */
   11716 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11717 		    &sig_byte);
   11718 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11719 			*bank = 1;
   11720 			return 0;
   11721 		}
   11722 	}
   11723 
   11724 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11725 		device_xname(sc->sc_dev)));
   11726 	return -1;
   11727 }
   11728 
   11729 /******************************************************************************
   11730  * This function does initial flash setup so that a new read/write/erase cycle
   11731  * can be started.
   11732  *
   11733  * sc - The pointer to the hw structure
   11734  ****************************************************************************/
   11735 static int32_t
   11736 wm_ich8_cycle_init(struct wm_softc *sc)
   11737 {
   11738 	uint16_t hsfsts;
   11739 	int32_t error = 1;
   11740 	int32_t i     = 0;
   11741 
   11742 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11743 
   11744 	/* May be check the Flash Des Valid bit in Hw status */
   11745 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11746 		return error;
   11747 	}
   11748 
   11749 	/* Clear FCERR in Hw status by writing 1 */
   11750 	/* Clear DAEL in Hw status by writing a 1 */
   11751 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11752 
   11753 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11754 
   11755 	/*
   11756 	 * Either we should have a hardware SPI cycle in progress bit to check
   11757 	 * against, in order to start a new cycle or FDONE bit should be
   11758 	 * changed in the hardware so that it is 1 after harware reset, which
   11759 	 * can then be used as an indication whether a cycle is in progress or
   11760 	 * has been completed .. we should also have some software semaphore
   11761 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11762 	 * threads access to those bits can be sequentiallized or a way so that
   11763 	 * 2 threads dont start the cycle at the same time
   11764 	 */
   11765 
   11766 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11767 		/*
   11768 		 * There is no cycle running at present, so we can start a
   11769 		 * cycle
   11770 		 */
   11771 
   11772 		/* Begin by setting Flash Cycle Done. */
   11773 		hsfsts |= HSFSTS_DONE;
   11774 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11775 		error = 0;
   11776 	} else {
   11777 		/*
   11778 		 * otherwise poll for sometime so the current cycle has a
   11779 		 * chance to end before giving up.
   11780 		 */
   11781 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11782 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11783 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11784 				error = 0;
   11785 				break;
   11786 			}
   11787 			delay(1);
   11788 		}
   11789 		if (error == 0) {
   11790 			/*
   11791 			 * Successful in waiting for previous cycle to timeout,
   11792 			 * now set the Flash Cycle Done.
   11793 			 */
   11794 			hsfsts |= HSFSTS_DONE;
   11795 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11796 		}
   11797 	}
   11798 	return error;
   11799 }
   11800 
   11801 /******************************************************************************
   11802  * This function starts a flash cycle and waits for its completion
   11803  *
   11804  * sc - The pointer to the hw structure
   11805  ****************************************************************************/
   11806 static int32_t
   11807 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11808 {
   11809 	uint16_t hsflctl;
   11810 	uint16_t hsfsts;
   11811 	int32_t error = 1;
   11812 	uint32_t i = 0;
   11813 
   11814 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11815 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11816 	hsflctl |= HSFCTL_GO;
   11817 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11818 
   11819 	/* Wait till FDONE bit is set to 1 */
   11820 	do {
   11821 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11822 		if (hsfsts & HSFSTS_DONE)
   11823 			break;
   11824 		delay(1);
   11825 		i++;
   11826 	} while (i < timeout);
   11827 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11828 		error = 0;
   11829 
   11830 	return error;
   11831 }
   11832 
   11833 /******************************************************************************
   11834  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11835  *
   11836  * sc - The pointer to the hw structure
   11837  * index - The index of the byte or word to read.
   11838  * size - Size of data to read, 1=byte 2=word, 4=dword
   11839  * data - Pointer to the word to store the value read.
   11840  *****************************************************************************/
   11841 static int32_t
   11842 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11843     uint32_t size, uint32_t *data)
   11844 {
   11845 	uint16_t hsfsts;
   11846 	uint16_t hsflctl;
   11847 	uint32_t flash_linear_address;
   11848 	uint32_t flash_data = 0;
   11849 	int32_t error = 1;
   11850 	int32_t count = 0;
   11851 
   11852 	if (size < 1  || size > 4 || data == 0x0 ||
   11853 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11854 		return error;
   11855 
   11856 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11857 	    sc->sc_ich8_flash_base;
   11858 
   11859 	do {
   11860 		delay(1);
   11861 		/* Steps */
   11862 		error = wm_ich8_cycle_init(sc);
   11863 		if (error)
   11864 			break;
   11865 
   11866 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11867 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11868 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11869 		    & HSFCTL_BCOUNT_MASK;
   11870 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11871 		if (sc->sc_type == WM_T_PCH_SPT) {
   11872 			/*
   11873 			 * In SPT, This register is in Lan memory space, not
   11874 			 * flash. Therefore, only 32 bit access is supported.
   11875 			 */
   11876 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11877 			    (uint32_t)hsflctl);
   11878 		} else
   11879 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11880 
   11881 		/*
   11882 		 * Write the last 24 bits of index into Flash Linear address
   11883 		 * field in Flash Address
   11884 		 */
   11885 		/* TODO: TBD maybe check the index against the size of flash */
   11886 
   11887 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11888 
   11889 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11890 
   11891 		/*
   11892 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11893 		 * the whole sequence a few more times, else read in (shift in)
   11894 		 * the Flash Data0, the order is least significant byte first
   11895 		 * msb to lsb
   11896 		 */
   11897 		if (error == 0) {
   11898 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11899 			if (size == 1)
   11900 				*data = (uint8_t)(flash_data & 0x000000FF);
   11901 			else if (size == 2)
   11902 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11903 			else if (size == 4)
   11904 				*data = (uint32_t)flash_data;
   11905 			break;
   11906 		} else {
   11907 			/*
   11908 			 * If we've gotten here, then things are probably
   11909 			 * completely hosed, but if the error condition is
   11910 			 * detected, it won't hurt to give it another try...
   11911 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11912 			 */
   11913 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11914 			if (hsfsts & HSFSTS_ERR) {
   11915 				/* Repeat for some time before giving up. */
   11916 				continue;
   11917 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11918 				break;
   11919 		}
   11920 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11921 
   11922 	return error;
   11923 }
   11924 
   11925 /******************************************************************************
   11926  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11927  *
   11928  * sc - pointer to wm_hw structure
   11929  * index - The index of the byte to read.
   11930  * data - Pointer to a byte to store the value read.
   11931  *****************************************************************************/
   11932 static int32_t
   11933 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11934 {
   11935 	int32_t status;
   11936 	uint32_t word = 0;
   11937 
   11938 	status = wm_read_ich8_data(sc, index, 1, &word);
   11939 	if (status == 0)
   11940 		*data = (uint8_t)word;
   11941 	else
   11942 		*data = 0;
   11943 
   11944 	return status;
   11945 }
   11946 
   11947 /******************************************************************************
   11948  * Reads a word from the NVM using the ICH8 flash access registers.
   11949  *
   11950  * sc - pointer to wm_hw structure
   11951  * index - The starting byte index of the word to read.
   11952  * data - Pointer to a word to store the value read.
   11953  *****************************************************************************/
   11954 static int32_t
   11955 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11956 {
   11957 	int32_t status;
   11958 	uint32_t word = 0;
   11959 
   11960 	status = wm_read_ich8_data(sc, index, 2, &word);
   11961 	if (status == 0)
   11962 		*data = (uint16_t)word;
   11963 	else
   11964 		*data = 0;
   11965 
   11966 	return status;
   11967 }
   11968 
   11969 /******************************************************************************
   11970  * Reads a dword from the NVM using the ICH8 flash access registers.
   11971  *
   11972  * sc - pointer to wm_hw structure
   11973  * index - The starting byte index of the word to read.
   11974  * data - Pointer to a word to store the value read.
   11975  *****************************************************************************/
   11976 static int32_t
   11977 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11978 {
   11979 	int32_t status;
   11980 
   11981 	status = wm_read_ich8_data(sc, index, 4, data);
   11982 	return status;
   11983 }
   11984 
   11985 /******************************************************************************
   11986  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11987  * register.
   11988  *
   11989  * sc - Struct containing variables accessed by shared code
   11990  * offset - offset of word in the EEPROM to read
   11991  * data - word read from the EEPROM
   11992  * words - number of words to read
   11993  *****************************************************************************/
   11994 static int
   11995 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11996 {
   11997 	int32_t  error = 0;
   11998 	uint32_t flash_bank = 0;
   11999 	uint32_t act_offset = 0;
   12000 	uint32_t bank_offset = 0;
   12001 	uint16_t word = 0;
   12002 	uint16_t i = 0;
   12003 
   12004 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12005 		device_xname(sc->sc_dev), __func__));
   12006 
   12007 	/*
   12008 	 * We need to know which is the valid flash bank.  In the event
   12009 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12010 	 * managing flash_bank.  So it cannot be trusted and needs
   12011 	 * to be updated with each read.
   12012 	 */
   12013 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12014 	if (error) {
   12015 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12016 			device_xname(sc->sc_dev)));
   12017 		flash_bank = 0;
   12018 	}
   12019 
   12020 	/*
   12021 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12022 	 * size
   12023 	 */
   12024 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12025 
   12026 	for (i = 0; i < words; i++) {
   12027 		/* The NVM part needs a byte offset, hence * 2 */
   12028 		act_offset = bank_offset + ((offset + i) * 2);
   12029 		error = wm_read_ich8_word(sc, act_offset, &word);
   12030 		if (error) {
   12031 			aprint_error_dev(sc->sc_dev,
   12032 			    "%s: failed to read NVM\n", __func__);
   12033 			break;
   12034 		}
   12035 		data[i] = word;
   12036 	}
   12037 
   12038 	return error;
   12039 }
   12040 
   12041 /******************************************************************************
   12042  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12043  * register.
   12044  *
   12045  * sc - Struct containing variables accessed by shared code
   12046  * offset - offset of word in the EEPROM to read
   12047  * data - word read from the EEPROM
   12048  * words - number of words to read
   12049  *****************************************************************************/
   12050 static int
   12051 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12052 {
   12053 	int32_t  error = 0;
   12054 	uint32_t flash_bank = 0;
   12055 	uint32_t act_offset = 0;
   12056 	uint32_t bank_offset = 0;
   12057 	uint32_t dword = 0;
   12058 	uint16_t i = 0;
   12059 
   12060 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12061 		device_xname(sc->sc_dev), __func__));
   12062 
   12063 	/*
   12064 	 * We need to know which is the valid flash bank.  In the event
   12065 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12066 	 * managing flash_bank.  So it cannot be trusted and needs
   12067 	 * to be updated with each read.
   12068 	 */
   12069 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12070 	if (error) {
   12071 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12072 			device_xname(sc->sc_dev)));
   12073 		flash_bank = 0;
   12074 	}
   12075 
   12076 	/*
   12077 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12078 	 * size
   12079 	 */
   12080 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12081 
   12082 	for (i = 0; i < words; i++) {
   12083 		/* The NVM part needs a byte offset, hence * 2 */
   12084 		act_offset = bank_offset + ((offset + i) * 2);
   12085 		/* but we must read dword aligned, so mask ... */
   12086 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12087 		if (error) {
   12088 			aprint_error_dev(sc->sc_dev,
   12089 			    "%s: failed to read NVM\n", __func__);
   12090 			break;
   12091 		}
   12092 		/* ... and pick out low or high word */
   12093 		if ((act_offset & 0x2) == 0)
   12094 			data[i] = (uint16_t)(dword & 0xFFFF);
   12095 		else
   12096 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12097 	}
   12098 
   12099 	return error;
   12100 }
   12101 
   12102 /* iNVM */
   12103 
   12104 static int
   12105 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12106 {
   12107 	int32_t  rv = 0;
   12108 	uint32_t invm_dword;
   12109 	uint16_t i;
   12110 	uint8_t record_type, word_address;
   12111 
   12112 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12113 		device_xname(sc->sc_dev), __func__));
   12114 
   12115 	for (i = 0; i < INVM_SIZE; i++) {
   12116 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12117 		/* Get record type */
   12118 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12119 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12120 			break;
   12121 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12122 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12123 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12124 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12125 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12126 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12127 			if (word_address == address) {
   12128 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12129 				rv = 0;
   12130 				break;
   12131 			}
   12132 		}
   12133 	}
   12134 
   12135 	return rv;
   12136 }
   12137 
   12138 static int
   12139 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12140 {
   12141 	int rv = 0;
   12142 	int i;
   12143 
   12144 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12145 		device_xname(sc->sc_dev), __func__));
   12146 
   12147 	for (i = 0; i < words; i++) {
   12148 		switch (offset + i) {
   12149 		case NVM_OFF_MACADDR:
   12150 		case NVM_OFF_MACADDR1:
   12151 		case NVM_OFF_MACADDR2:
   12152 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12153 			if (rv != 0) {
   12154 				data[i] = 0xffff;
   12155 				rv = -1;
   12156 			}
   12157 			break;
   12158 		case NVM_OFF_CFG2:
   12159 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12160 			if (rv != 0) {
   12161 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12162 				rv = 0;
   12163 			}
   12164 			break;
   12165 		case NVM_OFF_CFG4:
   12166 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12167 			if (rv != 0) {
   12168 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12169 				rv = 0;
   12170 			}
   12171 			break;
   12172 		case NVM_OFF_LED_1_CFG:
   12173 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12174 			if (rv != 0) {
   12175 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12176 				rv = 0;
   12177 			}
   12178 			break;
   12179 		case NVM_OFF_LED_0_2_CFG:
   12180 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12181 			if (rv != 0) {
   12182 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12183 				rv = 0;
   12184 			}
   12185 			break;
   12186 		case NVM_OFF_ID_LED_SETTINGS:
   12187 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12188 			if (rv != 0) {
   12189 				*data = ID_LED_RESERVED_FFFF;
   12190 				rv = 0;
   12191 			}
   12192 			break;
   12193 		default:
   12194 			DPRINTF(WM_DEBUG_NVM,
   12195 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12196 			*data = NVM_RESERVED_WORD;
   12197 			break;
   12198 		}
   12199 	}
   12200 
   12201 	return rv;
   12202 }
   12203 
   12204 /* Lock, detecting NVM type, validate checksum, version and read */
   12205 
   12206 /*
   12207  * wm_nvm_acquire:
   12208  *
   12209  *	Perform the EEPROM handshake required on some chips.
   12210  */
   12211 static int
   12212 wm_nvm_acquire(struct wm_softc *sc)
   12213 {
   12214 	uint32_t reg;
   12215 	int x;
   12216 	int ret = 0;
   12217 
   12218 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12219 		device_xname(sc->sc_dev), __func__));
   12220 
   12221 	if (sc->sc_type >= WM_T_ICH8) {
   12222 		ret = wm_get_nvm_ich8lan(sc);
   12223 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12224 		ret = wm_get_swfwhw_semaphore(sc);
   12225 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12226 		/* This will also do wm_get_swsm_semaphore() if needed */
   12227 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12228 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12229 		ret = wm_get_swsm_semaphore(sc);
   12230 	}
   12231 
   12232 	if (ret) {
   12233 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12234 			__func__);
   12235 		return 1;
   12236 	}
   12237 
   12238 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12239 		reg = CSR_READ(sc, WMREG_EECD);
   12240 
   12241 		/* Request EEPROM access. */
   12242 		reg |= EECD_EE_REQ;
   12243 		CSR_WRITE(sc, WMREG_EECD, reg);
   12244 
   12245 		/* ..and wait for it to be granted. */
   12246 		for (x = 0; x < 1000; x++) {
   12247 			reg = CSR_READ(sc, WMREG_EECD);
   12248 			if (reg & EECD_EE_GNT)
   12249 				break;
   12250 			delay(5);
   12251 		}
   12252 		if ((reg & EECD_EE_GNT) == 0) {
   12253 			aprint_error_dev(sc->sc_dev,
   12254 			    "could not acquire EEPROM GNT\n");
   12255 			reg &= ~EECD_EE_REQ;
   12256 			CSR_WRITE(sc, WMREG_EECD, reg);
   12257 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12258 				wm_put_swfwhw_semaphore(sc);
   12259 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12260 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12261 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12262 				wm_put_swsm_semaphore(sc);
   12263 			return 1;
   12264 		}
   12265 	}
   12266 
   12267 	return 0;
   12268 }
   12269 
   12270 /*
   12271  * wm_nvm_release:
   12272  *
   12273  *	Release the EEPROM mutex.
   12274  */
   12275 static void
   12276 wm_nvm_release(struct wm_softc *sc)
   12277 {
   12278 	uint32_t reg;
   12279 
   12280 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12281 		device_xname(sc->sc_dev), __func__));
   12282 
   12283 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12284 		reg = CSR_READ(sc, WMREG_EECD);
   12285 		reg &= ~EECD_EE_REQ;
   12286 		CSR_WRITE(sc, WMREG_EECD, reg);
   12287 	}
   12288 
   12289 	if (sc->sc_type >= WM_T_ICH8) {
   12290 		wm_put_nvm_ich8lan(sc);
   12291 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12292 		wm_put_swfwhw_semaphore(sc);
   12293 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12294 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12295 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12296 		wm_put_swsm_semaphore(sc);
   12297 }
   12298 
   12299 static int
   12300 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12301 {
   12302 	uint32_t eecd = 0;
   12303 
   12304 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12305 	    || sc->sc_type == WM_T_82583) {
   12306 		eecd = CSR_READ(sc, WMREG_EECD);
   12307 
   12308 		/* Isolate bits 15 & 16 */
   12309 		eecd = ((eecd >> 15) & 0x03);
   12310 
   12311 		/* If both bits are set, device is Flash type */
   12312 		if (eecd == 0x03)
   12313 			return 0;
   12314 	}
   12315 	return 1;
   12316 }
   12317 
   12318 static int
   12319 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12320 {
   12321 	uint32_t eec;
   12322 
   12323 	eec = CSR_READ(sc, WMREG_EEC);
   12324 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12325 		return 1;
   12326 
   12327 	return 0;
   12328 }
   12329 
   12330 /*
   12331  * wm_nvm_validate_checksum
   12332  *
   12333  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12334  */
   12335 static int
   12336 wm_nvm_validate_checksum(struct wm_softc *sc)
   12337 {
   12338 	uint16_t checksum;
   12339 	uint16_t eeprom_data;
   12340 #ifdef WM_DEBUG
   12341 	uint16_t csum_wordaddr, valid_checksum;
   12342 #endif
   12343 	int i;
   12344 
   12345 	checksum = 0;
   12346 
   12347 	/* Don't check for I211 */
   12348 	if (sc->sc_type == WM_T_I211)
   12349 		return 0;
   12350 
   12351 #ifdef WM_DEBUG
   12352 	if (sc->sc_type == WM_T_PCH_LPT) {
   12353 		csum_wordaddr = NVM_OFF_COMPAT;
   12354 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12355 	} else {
   12356 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12357 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12358 	}
   12359 
   12360 	/* Dump EEPROM image for debug */
   12361 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12362 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12363 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12364 		/* XXX PCH_SPT? */
   12365 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12366 		if ((eeprom_data & valid_checksum) == 0) {
   12367 			DPRINTF(WM_DEBUG_NVM,
   12368 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12369 				device_xname(sc->sc_dev), eeprom_data,
   12370 				    valid_checksum));
   12371 		}
   12372 	}
   12373 
   12374 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12375 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12376 		for (i = 0; i < NVM_SIZE; i++) {
   12377 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12378 				printf("XXXX ");
   12379 			else
   12380 				printf("%04hx ", eeprom_data);
   12381 			if (i % 8 == 7)
   12382 				printf("\n");
   12383 		}
   12384 	}
   12385 
   12386 #endif /* WM_DEBUG */
   12387 
   12388 	for (i = 0; i < NVM_SIZE; i++) {
   12389 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12390 			return 1;
   12391 		checksum += eeprom_data;
   12392 	}
   12393 
   12394 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12395 #ifdef WM_DEBUG
   12396 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12397 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12398 #endif
   12399 	}
   12400 
   12401 	return 0;
   12402 }
   12403 
   12404 static void
   12405 wm_nvm_version_invm(struct wm_softc *sc)
   12406 {
   12407 	uint32_t dword;
   12408 
   12409 	/*
   12410 	 * Linux's code to decode version is very strange, so we don't
   12411 	 * obey that algorithm and just use word 61 as the document.
   12412 	 * Perhaps it's not perfect though...
   12413 	 *
   12414 	 * Example:
   12415 	 *
   12416 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12417 	 */
   12418 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12419 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12420 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12421 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12422 }
   12423 
   12424 static void
   12425 wm_nvm_version(struct wm_softc *sc)
   12426 {
   12427 	uint16_t major, minor, build, patch;
   12428 	uint16_t uid0, uid1;
   12429 	uint16_t nvm_data;
   12430 	uint16_t off;
   12431 	bool check_version = false;
   12432 	bool check_optionrom = false;
   12433 	bool have_build = false;
   12434 	bool have_uid = true;
   12435 
   12436 	/*
   12437 	 * Version format:
   12438 	 *
   12439 	 * XYYZ
   12440 	 * X0YZ
   12441 	 * X0YY
   12442 	 *
   12443 	 * Example:
   12444 	 *
   12445 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12446 	 *	82571	0x50a6	5.10.6?
   12447 	 *	82572	0x506a	5.6.10?
   12448 	 *	82572EI	0x5069	5.6.9?
   12449 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12450 	 *		0x2013	2.1.3?
   12451 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12452 	 */
   12453 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12454 	switch (sc->sc_type) {
   12455 	case WM_T_82571:
   12456 	case WM_T_82572:
   12457 	case WM_T_82574:
   12458 	case WM_T_82583:
   12459 		check_version = true;
   12460 		check_optionrom = true;
   12461 		have_build = true;
   12462 		break;
   12463 	case WM_T_82575:
   12464 	case WM_T_82576:
   12465 	case WM_T_82580:
   12466 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12467 			check_version = true;
   12468 		break;
   12469 	case WM_T_I211:
   12470 		wm_nvm_version_invm(sc);
   12471 		have_uid = false;
   12472 		goto printver;
   12473 	case WM_T_I210:
   12474 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12475 			wm_nvm_version_invm(sc);
   12476 			have_uid = false;
   12477 			goto printver;
   12478 		}
   12479 		/* FALLTHROUGH */
   12480 	case WM_T_I350:
   12481 	case WM_T_I354:
   12482 		check_version = true;
   12483 		check_optionrom = true;
   12484 		break;
   12485 	default:
   12486 		return;
   12487 	}
   12488 	if (check_version) {
   12489 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12490 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12491 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12492 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12493 			build = nvm_data & NVM_BUILD_MASK;
   12494 			have_build = true;
   12495 		} else
   12496 			minor = nvm_data & 0x00ff;
   12497 
   12498 		/* Decimal */
   12499 		minor = (minor / 16) * 10 + (minor % 16);
   12500 		sc->sc_nvm_ver_major = major;
   12501 		sc->sc_nvm_ver_minor = minor;
   12502 
   12503 printver:
   12504 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12505 		    sc->sc_nvm_ver_minor);
   12506 		if (have_build) {
   12507 			sc->sc_nvm_ver_build = build;
   12508 			aprint_verbose(".%d", build);
   12509 		}
   12510 	}
   12511 	if (check_optionrom) {
   12512 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12513 		/* Option ROM Version */
   12514 		if ((off != 0x0000) && (off != 0xffff)) {
   12515 			off += NVM_COMBO_VER_OFF;
   12516 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12517 			wm_nvm_read(sc, off, 1, &uid0);
   12518 			if ((uid0 != 0) && (uid0 != 0xffff)
   12519 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12520 				/* 16bits */
   12521 				major = uid0 >> 8;
   12522 				build = (uid0 << 8) | (uid1 >> 8);
   12523 				patch = uid1 & 0x00ff;
   12524 				aprint_verbose(", option ROM Version %d.%d.%d",
   12525 				    major, build, patch);
   12526 			}
   12527 		}
   12528 	}
   12529 
   12530 	if (have_uid) {
   12531 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12532 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12533 	}
   12534 }
   12535 
   12536 /*
   12537  * wm_nvm_read:
   12538  *
   12539  *	Read data from the serial EEPROM.
   12540  */
   12541 static int
   12542 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12543 {
   12544 	int rv;
   12545 
   12546 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12547 		device_xname(sc->sc_dev), __func__));
   12548 
   12549 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12550 		return 1;
   12551 
   12552 	if (wm_nvm_acquire(sc))
   12553 		return 1;
   12554 
   12555 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12556 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12557 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12558 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12559 	else if (sc->sc_type == WM_T_PCH_SPT)
   12560 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12561 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12562 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12563 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12564 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12565 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12566 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12567 	else
   12568 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12569 
   12570 	wm_nvm_release(sc);
   12571 	return rv;
   12572 }
   12573 
   12574 /*
   12575  * Hardware semaphores.
   12576  * Very complexed...
   12577  */
   12578 
   12579 static int
   12580 wm_get_null(struct wm_softc *sc)
   12581 {
   12582 
   12583 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12584 		device_xname(sc->sc_dev), __func__));
   12585 	return 0;
   12586 }
   12587 
   12588 static void
   12589 wm_put_null(struct wm_softc *sc)
   12590 {
   12591 
   12592 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12593 		device_xname(sc->sc_dev), __func__));
   12594 	return;
   12595 }
   12596 
   12597 /*
   12598  * Get hardware semaphore.
   12599  * Same as e1000_get_hw_semaphore_generic()
   12600  */
   12601 static int
   12602 wm_get_swsm_semaphore(struct wm_softc *sc)
   12603 {
   12604 	int32_t timeout;
   12605 	uint32_t swsm;
   12606 
   12607 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12608 		device_xname(sc->sc_dev), __func__));
   12609 	KASSERT(sc->sc_nvm_wordsize > 0);
   12610 
   12611 	/* Get the SW semaphore. */
   12612 	timeout = sc->sc_nvm_wordsize + 1;
   12613 	while (timeout) {
   12614 		swsm = CSR_READ(sc, WMREG_SWSM);
   12615 
   12616 		if ((swsm & SWSM_SMBI) == 0)
   12617 			break;
   12618 
   12619 		delay(50);
   12620 		timeout--;
   12621 	}
   12622 
   12623 	if (timeout == 0) {
   12624 		aprint_error_dev(sc->sc_dev,
   12625 		    "could not acquire SWSM SMBI\n");
   12626 		return 1;
   12627 	}
   12628 
   12629 	/* Get the FW semaphore. */
   12630 	timeout = sc->sc_nvm_wordsize + 1;
   12631 	while (timeout) {
   12632 		swsm = CSR_READ(sc, WMREG_SWSM);
   12633 		swsm |= SWSM_SWESMBI;
   12634 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12635 		/* If we managed to set the bit we got the semaphore. */
   12636 		swsm = CSR_READ(sc, WMREG_SWSM);
   12637 		if (swsm & SWSM_SWESMBI)
   12638 			break;
   12639 
   12640 		delay(50);
   12641 		timeout--;
   12642 	}
   12643 
   12644 	if (timeout == 0) {
   12645 		aprint_error_dev(sc->sc_dev,
   12646 		    "could not acquire SWSM SWESMBI\n");
   12647 		/* Release semaphores */
   12648 		wm_put_swsm_semaphore(sc);
   12649 		return 1;
   12650 	}
   12651 	return 0;
   12652 }
   12653 
   12654 /*
   12655  * Put hardware semaphore.
   12656  * Same as e1000_put_hw_semaphore_generic()
   12657  */
   12658 static void
   12659 wm_put_swsm_semaphore(struct wm_softc *sc)
   12660 {
   12661 	uint32_t swsm;
   12662 
   12663 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12664 		device_xname(sc->sc_dev), __func__));
   12665 
   12666 	swsm = CSR_READ(sc, WMREG_SWSM);
   12667 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12668 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12669 }
   12670 
   12671 /*
   12672  * Get SW/FW semaphore.
   12673  * Same as e1000_acquire_swfw_sync_82575().
   12674  */
   12675 static int
   12676 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12677 {
   12678 	uint32_t swfw_sync;
   12679 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12680 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12681 	int timeout = 200;
   12682 
   12683 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12684 		device_xname(sc->sc_dev), __func__));
   12685 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12686 
   12687 	for (timeout = 0; timeout < 200; timeout++) {
   12688 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12689 			if (wm_get_swsm_semaphore(sc)) {
   12690 				aprint_error_dev(sc->sc_dev,
   12691 				    "%s: failed to get semaphore\n",
   12692 				    __func__);
   12693 				return 1;
   12694 			}
   12695 		}
   12696 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12697 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12698 			swfw_sync |= swmask;
   12699 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12700 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12701 				wm_put_swsm_semaphore(sc);
   12702 			return 0;
   12703 		}
   12704 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12705 			wm_put_swsm_semaphore(sc);
   12706 		delay(5000);
   12707 	}
   12708 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12709 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12710 	return 1;
   12711 }
   12712 
   12713 static void
   12714 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12715 {
   12716 	uint32_t swfw_sync;
   12717 
   12718 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12719 		device_xname(sc->sc_dev), __func__));
   12720 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12721 
   12722 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12723 		while (wm_get_swsm_semaphore(sc) != 0)
   12724 			continue;
   12725 	}
   12726 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12727 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12728 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12729 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12730 		wm_put_swsm_semaphore(sc);
   12731 }
   12732 
   12733 static int
   12734 wm_get_phy_82575(struct wm_softc *sc)
   12735 {
   12736 
   12737 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12738 		device_xname(sc->sc_dev), __func__));
   12739 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12740 }
   12741 
   12742 static void
   12743 wm_put_phy_82575(struct wm_softc *sc)
   12744 {
   12745 
   12746 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12747 		device_xname(sc->sc_dev), __func__));
   12748 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12749 }
   12750 
   12751 static int
   12752 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12753 {
   12754 	uint32_t ext_ctrl;
   12755 	int timeout = 200;
   12756 
   12757 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12758 		device_xname(sc->sc_dev), __func__));
   12759 
   12760 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12761 	for (timeout = 0; timeout < 200; timeout++) {
   12762 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12763 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12764 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12765 
   12766 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12767 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12768 			return 0;
   12769 		delay(5000);
   12770 	}
   12771 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12772 	    device_xname(sc->sc_dev), ext_ctrl);
   12773 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12774 	return 1;
   12775 }
   12776 
   12777 static void
   12778 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12779 {
   12780 	uint32_t ext_ctrl;
   12781 
   12782 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12783 		device_xname(sc->sc_dev), __func__));
   12784 
   12785 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12786 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12787 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12788 
   12789 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12790 }
   12791 
   12792 static int
   12793 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12794 {
   12795 	uint32_t ext_ctrl;
   12796 	int timeout;
   12797 
   12798 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12799 		device_xname(sc->sc_dev), __func__));
   12800 	mutex_enter(sc->sc_ich_phymtx);
   12801 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12802 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12803 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12804 			break;
   12805 		delay(1000);
   12806 	}
   12807 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12808 		printf("%s: SW has already locked the resource\n",
   12809 		    device_xname(sc->sc_dev));
   12810 		goto out;
   12811 	}
   12812 
   12813 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12814 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12815 	for (timeout = 0; timeout < 1000; timeout++) {
   12816 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12817 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12818 			break;
   12819 		delay(1000);
   12820 	}
   12821 	if (timeout >= 1000) {
   12822 		printf("%s: failed to acquire semaphore\n",
   12823 		    device_xname(sc->sc_dev));
   12824 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12825 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12826 		goto out;
   12827 	}
   12828 	return 0;
   12829 
   12830 out:
   12831 	mutex_exit(sc->sc_ich_phymtx);
   12832 	return 1;
   12833 }
   12834 
   12835 static void
   12836 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12837 {
   12838 	uint32_t ext_ctrl;
   12839 
   12840 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12841 		device_xname(sc->sc_dev), __func__));
   12842 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12843 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12844 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12845 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12846 	} else {
   12847 		printf("%s: Semaphore unexpectedly released\n",
   12848 		    device_xname(sc->sc_dev));
   12849 	}
   12850 
   12851 	mutex_exit(sc->sc_ich_phymtx);
   12852 }
   12853 
   12854 static int
   12855 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12856 {
   12857 
   12858 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12859 		device_xname(sc->sc_dev), __func__));
   12860 	mutex_enter(sc->sc_ich_nvmmtx);
   12861 
   12862 	return 0;
   12863 }
   12864 
   12865 static void
   12866 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12867 {
   12868 
   12869 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12870 		device_xname(sc->sc_dev), __func__));
   12871 	mutex_exit(sc->sc_ich_nvmmtx);
   12872 }
   12873 
   12874 static int
   12875 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12876 {
   12877 	int i = 0;
   12878 	uint32_t reg;
   12879 
   12880 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12881 		device_xname(sc->sc_dev), __func__));
   12882 
   12883 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12884 	do {
   12885 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12886 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12887 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12888 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12889 			break;
   12890 		delay(2*1000);
   12891 		i++;
   12892 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12893 
   12894 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12895 		wm_put_hw_semaphore_82573(sc);
   12896 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12897 		    device_xname(sc->sc_dev));
   12898 		return -1;
   12899 	}
   12900 
   12901 	return 0;
   12902 }
   12903 
   12904 static void
   12905 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12906 {
   12907 	uint32_t reg;
   12908 
   12909 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12910 		device_xname(sc->sc_dev), __func__));
   12911 
   12912 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12913 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12914 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12915 }
   12916 
   12917 /*
   12918  * Management mode and power management related subroutines.
   12919  * BMC, AMT, suspend/resume and EEE.
   12920  */
   12921 
   12922 #ifdef WM_WOL
   12923 static int
   12924 wm_check_mng_mode(struct wm_softc *sc)
   12925 {
   12926 	int rv;
   12927 
   12928 	switch (sc->sc_type) {
   12929 	case WM_T_ICH8:
   12930 	case WM_T_ICH9:
   12931 	case WM_T_ICH10:
   12932 	case WM_T_PCH:
   12933 	case WM_T_PCH2:
   12934 	case WM_T_PCH_LPT:
   12935 	case WM_T_PCH_SPT:
   12936 		rv = wm_check_mng_mode_ich8lan(sc);
   12937 		break;
   12938 	case WM_T_82574:
   12939 	case WM_T_82583:
   12940 		rv = wm_check_mng_mode_82574(sc);
   12941 		break;
   12942 	case WM_T_82571:
   12943 	case WM_T_82572:
   12944 	case WM_T_82573:
   12945 	case WM_T_80003:
   12946 		rv = wm_check_mng_mode_generic(sc);
   12947 		break;
   12948 	default:
   12949 		/* noting to do */
   12950 		rv = 0;
   12951 		break;
   12952 	}
   12953 
   12954 	return rv;
   12955 }
   12956 
   12957 static int
   12958 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12959 {
   12960 	uint32_t fwsm;
   12961 
   12962 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12963 
   12964 	if (((fwsm & FWSM_FW_VALID) != 0)
   12965 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12966 		return 1;
   12967 
   12968 	return 0;
   12969 }
   12970 
   12971 static int
   12972 wm_check_mng_mode_82574(struct wm_softc *sc)
   12973 {
   12974 	uint16_t data;
   12975 
   12976 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12977 
   12978 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12979 		return 1;
   12980 
   12981 	return 0;
   12982 }
   12983 
   12984 static int
   12985 wm_check_mng_mode_generic(struct wm_softc *sc)
   12986 {
   12987 	uint32_t fwsm;
   12988 
   12989 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12990 
   12991 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12992 		return 1;
   12993 
   12994 	return 0;
   12995 }
   12996 #endif /* WM_WOL */
   12997 
   12998 static int
   12999 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13000 {
   13001 	uint32_t manc, fwsm, factps;
   13002 
   13003 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13004 		return 0;
   13005 
   13006 	manc = CSR_READ(sc, WMREG_MANC);
   13007 
   13008 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13009 		device_xname(sc->sc_dev), manc));
   13010 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13011 		return 0;
   13012 
   13013 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13014 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13015 		factps = CSR_READ(sc, WMREG_FACTPS);
   13016 		if (((factps & FACTPS_MNGCG) == 0)
   13017 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13018 			return 1;
   13019 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13020 		uint16_t data;
   13021 
   13022 		factps = CSR_READ(sc, WMREG_FACTPS);
   13023 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13024 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13025 			device_xname(sc->sc_dev), factps, data));
   13026 		if (((factps & FACTPS_MNGCG) == 0)
   13027 		    && ((data & NVM_CFG2_MNGM_MASK)
   13028 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13029 			return 1;
   13030 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13031 	    && ((manc & MANC_ASF_EN) == 0))
   13032 		return 1;
   13033 
   13034 	return 0;
   13035 }
   13036 
   13037 static bool
   13038 wm_phy_resetisblocked(struct wm_softc *sc)
   13039 {
   13040 	bool blocked = false;
   13041 	uint32_t reg;
   13042 	int i = 0;
   13043 
   13044 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13045 		device_xname(sc->sc_dev), __func__));
   13046 
   13047 	switch (sc->sc_type) {
   13048 	case WM_T_ICH8:
   13049 	case WM_T_ICH9:
   13050 	case WM_T_ICH10:
   13051 	case WM_T_PCH:
   13052 	case WM_T_PCH2:
   13053 	case WM_T_PCH_LPT:
   13054 	case WM_T_PCH_SPT:
   13055 		do {
   13056 			reg = CSR_READ(sc, WMREG_FWSM);
   13057 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13058 				blocked = true;
   13059 				delay(10*1000);
   13060 				continue;
   13061 			}
   13062 			blocked = false;
   13063 		} while (blocked && (i++ < 30));
   13064 		return blocked;
   13065 		break;
   13066 	case WM_T_82571:
   13067 	case WM_T_82572:
   13068 	case WM_T_82573:
   13069 	case WM_T_82574:
   13070 	case WM_T_82583:
   13071 	case WM_T_80003:
   13072 		reg = CSR_READ(sc, WMREG_MANC);
   13073 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13074 			return true;
   13075 		else
   13076 			return false;
   13077 		break;
   13078 	default:
   13079 		/* no problem */
   13080 		break;
   13081 	}
   13082 
   13083 	return false;
   13084 }
   13085 
   13086 static void
   13087 wm_get_hw_control(struct wm_softc *sc)
   13088 {
   13089 	uint32_t reg;
   13090 
   13091 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13092 		device_xname(sc->sc_dev), __func__));
   13093 
   13094 	if (sc->sc_type == WM_T_82573) {
   13095 		reg = CSR_READ(sc, WMREG_SWSM);
   13096 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13097 	} else if (sc->sc_type >= WM_T_82571) {
   13098 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13099 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13100 	}
   13101 }
   13102 
   13103 static void
   13104 wm_release_hw_control(struct wm_softc *sc)
   13105 {
   13106 	uint32_t reg;
   13107 
   13108 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13109 		device_xname(sc->sc_dev), __func__));
   13110 
   13111 	if (sc->sc_type == WM_T_82573) {
   13112 		reg = CSR_READ(sc, WMREG_SWSM);
   13113 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13114 	} else if (sc->sc_type >= WM_T_82571) {
   13115 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13116 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13117 	}
   13118 }
   13119 
   13120 static void
   13121 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13122 {
   13123 	uint32_t reg;
   13124 
   13125 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13126 		device_xname(sc->sc_dev), __func__));
   13127 
   13128 	if (sc->sc_type < WM_T_PCH2)
   13129 		return;
   13130 
   13131 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13132 
   13133 	if (gate)
   13134 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13135 	else
   13136 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13137 
   13138 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13139 }
   13140 
   13141 static void
   13142 wm_smbustopci(struct wm_softc *sc)
   13143 {
   13144 	uint32_t fwsm, reg;
   13145 	int rv = 0;
   13146 
   13147 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13148 		device_xname(sc->sc_dev), __func__));
   13149 
   13150 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13151 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13152 
   13153 	/* Disable ULP */
   13154 	wm_ulp_disable(sc);
   13155 
   13156 	/* Acquire PHY semaphore */
   13157 	sc->phy.acquire(sc);
   13158 
   13159 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13160 	switch (sc->sc_type) {
   13161 	case WM_T_PCH_LPT:
   13162 	case WM_T_PCH_SPT:
   13163 		if (wm_phy_is_accessible_pchlan(sc))
   13164 			break;
   13165 
   13166 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13167 		reg |= CTRL_EXT_FORCE_SMBUS;
   13168 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13169 #if 0
   13170 		/* XXX Isn't this required??? */
   13171 		CSR_WRITE_FLUSH(sc);
   13172 #endif
   13173 		delay(50 * 1000);
   13174 		/* FALLTHROUGH */
   13175 	case WM_T_PCH2:
   13176 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13177 			break;
   13178 		/* FALLTHROUGH */
   13179 	case WM_T_PCH:
   13180 		if (sc->sc_type == WM_T_PCH)
   13181 			if ((fwsm & FWSM_FW_VALID) != 0)
   13182 				break;
   13183 
   13184 		if (wm_phy_resetisblocked(sc) == true) {
   13185 			printf("XXX reset is blocked(3)\n");
   13186 			break;
   13187 		}
   13188 
   13189 		wm_toggle_lanphypc_pch_lpt(sc);
   13190 
   13191 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13192 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13193 				break;
   13194 
   13195 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13196 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13197 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13198 
   13199 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13200 				break;
   13201 			rv = -1;
   13202 		}
   13203 		break;
   13204 	default:
   13205 		break;
   13206 	}
   13207 
   13208 	/* Release semaphore */
   13209 	sc->phy.release(sc);
   13210 
   13211 	if (rv == 0) {
   13212 		if (wm_phy_resetisblocked(sc)) {
   13213 			printf("XXX reset is blocked(4)\n");
   13214 			goto out;
   13215 		}
   13216 		wm_reset_phy(sc);
   13217 		if (wm_phy_resetisblocked(sc))
   13218 			printf("XXX reset is blocked(4)\n");
   13219 	}
   13220 
   13221 out:
   13222 	/*
   13223 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13224 	 */
   13225 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13226 		delay(10*1000);
   13227 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13228 	}
   13229 }
   13230 
   13231 static void
   13232 wm_init_manageability(struct wm_softc *sc)
   13233 {
   13234 
   13235 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13236 		device_xname(sc->sc_dev), __func__));
   13237 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13238 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13239 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13240 
   13241 		/* Disable hardware interception of ARP */
   13242 		manc &= ~MANC_ARP_EN;
   13243 
   13244 		/* Enable receiving management packets to the host */
   13245 		if (sc->sc_type >= WM_T_82571) {
   13246 			manc |= MANC_EN_MNG2HOST;
   13247 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13248 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13249 		}
   13250 
   13251 		CSR_WRITE(sc, WMREG_MANC, manc);
   13252 	}
   13253 }
   13254 
   13255 static void
   13256 wm_release_manageability(struct wm_softc *sc)
   13257 {
   13258 
   13259 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13260 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13261 
   13262 		manc |= MANC_ARP_EN;
   13263 		if (sc->sc_type >= WM_T_82571)
   13264 			manc &= ~MANC_EN_MNG2HOST;
   13265 
   13266 		CSR_WRITE(sc, WMREG_MANC, manc);
   13267 	}
   13268 }
   13269 
   13270 static void
   13271 wm_get_wakeup(struct wm_softc *sc)
   13272 {
   13273 
   13274 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13275 	switch (sc->sc_type) {
   13276 	case WM_T_82573:
   13277 	case WM_T_82583:
   13278 		sc->sc_flags |= WM_F_HAS_AMT;
   13279 		/* FALLTHROUGH */
   13280 	case WM_T_80003:
   13281 	case WM_T_82575:
   13282 	case WM_T_82576:
   13283 	case WM_T_82580:
   13284 	case WM_T_I350:
   13285 	case WM_T_I354:
   13286 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13287 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13288 		/* FALLTHROUGH */
   13289 	case WM_T_82541:
   13290 	case WM_T_82541_2:
   13291 	case WM_T_82547:
   13292 	case WM_T_82547_2:
   13293 	case WM_T_82571:
   13294 	case WM_T_82572:
   13295 	case WM_T_82574:
   13296 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13297 		break;
   13298 	case WM_T_ICH8:
   13299 	case WM_T_ICH9:
   13300 	case WM_T_ICH10:
   13301 	case WM_T_PCH:
   13302 	case WM_T_PCH2:
   13303 	case WM_T_PCH_LPT:
   13304 	case WM_T_PCH_SPT:
   13305 		sc->sc_flags |= WM_F_HAS_AMT;
   13306 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13307 		break;
   13308 	default:
   13309 		break;
   13310 	}
   13311 
   13312 	/* 1: HAS_MANAGE */
   13313 	if (wm_enable_mng_pass_thru(sc) != 0)
   13314 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13315 
   13316 	/*
   13317 	 * Note that the WOL flags is set after the resetting of the eeprom
   13318 	 * stuff
   13319 	 */
   13320 }
   13321 
   13322 /*
   13323  * Unconfigure Ultra Low Power mode.
   13324  * Only for I217 and newer (see below).
   13325  */
   13326 static void
   13327 wm_ulp_disable(struct wm_softc *sc)
   13328 {
   13329 	uint32_t reg;
   13330 	int i = 0;
   13331 
   13332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13333 		device_xname(sc->sc_dev), __func__));
   13334 	/* Exclude old devices */
   13335 	if ((sc->sc_type < WM_T_PCH_LPT)
   13336 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13337 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13338 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13339 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13340 		return;
   13341 
   13342 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13343 		/* Request ME un-configure ULP mode in the PHY */
   13344 		reg = CSR_READ(sc, WMREG_H2ME);
   13345 		reg &= ~H2ME_ULP;
   13346 		reg |= H2ME_ENFORCE_SETTINGS;
   13347 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13348 
   13349 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13350 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13351 			if (i++ == 30) {
   13352 				printf("%s timed out\n", __func__);
   13353 				return;
   13354 			}
   13355 			delay(10 * 1000);
   13356 		}
   13357 		reg = CSR_READ(sc, WMREG_H2ME);
   13358 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13359 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13360 
   13361 		return;
   13362 	}
   13363 
   13364 	/* Acquire semaphore */
   13365 	sc->phy.acquire(sc);
   13366 
   13367 	/* Toggle LANPHYPC */
   13368 	wm_toggle_lanphypc_pch_lpt(sc);
   13369 
   13370 	/* Unforce SMBus mode in PHY */
   13371 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13372 	if (reg == 0x0000 || reg == 0xffff) {
   13373 		uint32_t reg2;
   13374 
   13375 		printf("%s: Force SMBus first.\n", __func__);
   13376 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13377 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13378 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13379 		delay(50 * 1000);
   13380 
   13381 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13382 	}
   13383 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13384 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13385 
   13386 	/* Unforce SMBus mode in MAC */
   13387 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13388 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13389 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13390 
   13391 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13392 	reg |= HV_PM_CTRL_K1_ENA;
   13393 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13394 
   13395 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13396 	reg &= ~(I218_ULP_CONFIG1_IND
   13397 	    | I218_ULP_CONFIG1_STICKY_ULP
   13398 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13399 	    | I218_ULP_CONFIG1_WOL_HOST
   13400 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13401 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13402 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13403 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13404 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13405 	reg |= I218_ULP_CONFIG1_START;
   13406 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13407 
   13408 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13409 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13410 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13411 
   13412 	/* Release semaphore */
   13413 	sc->phy.release(sc);
   13414 	wm_gmii_reset(sc);
   13415 	delay(50 * 1000);
   13416 }
   13417 
   13418 /* WOL in the newer chipset interfaces (pchlan) */
   13419 static void
   13420 wm_enable_phy_wakeup(struct wm_softc *sc)
   13421 {
   13422 #if 0
   13423 	uint16_t preg;
   13424 
   13425 	/* Copy MAC RARs to PHY RARs */
   13426 
   13427 	/* Copy MAC MTA to PHY MTA */
   13428 
   13429 	/* Configure PHY Rx Control register */
   13430 
   13431 	/* Enable PHY wakeup in MAC register */
   13432 
   13433 	/* Configure and enable PHY wakeup in PHY registers */
   13434 
   13435 	/* Activate PHY wakeup */
   13436 
   13437 	/* XXX */
   13438 #endif
   13439 }
   13440 
   13441 /* Power down workaround on D3 */
   13442 static void
   13443 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13444 {
   13445 	uint32_t reg;
   13446 	int i;
   13447 
   13448 	for (i = 0; i < 2; i++) {
   13449 		/* Disable link */
   13450 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13451 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13452 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13453 
   13454 		/*
   13455 		 * Call gig speed drop workaround on Gig disable before
   13456 		 * accessing any PHY registers
   13457 		 */
   13458 		if (sc->sc_type == WM_T_ICH8)
   13459 			wm_gig_downshift_workaround_ich8lan(sc);
   13460 
   13461 		/* Write VR power-down enable */
   13462 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13463 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13464 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13465 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13466 
   13467 		/* Read it back and test */
   13468 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13469 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13470 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13471 			break;
   13472 
   13473 		/* Issue PHY reset and repeat at most one more time */
   13474 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13475 	}
   13476 }
   13477 
   13478 static void
   13479 wm_enable_wakeup(struct wm_softc *sc)
   13480 {
   13481 	uint32_t reg, pmreg;
   13482 	pcireg_t pmode;
   13483 
   13484 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13485 		device_xname(sc->sc_dev), __func__));
   13486 
   13487 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13488 		&pmreg, NULL) == 0)
   13489 		return;
   13490 
   13491 	/* Advertise the wakeup capability */
   13492 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13493 	    | CTRL_SWDPIN(3));
   13494 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13495 
   13496 	/* ICH workaround */
   13497 	switch (sc->sc_type) {
   13498 	case WM_T_ICH8:
   13499 	case WM_T_ICH9:
   13500 	case WM_T_ICH10:
   13501 	case WM_T_PCH:
   13502 	case WM_T_PCH2:
   13503 	case WM_T_PCH_LPT:
   13504 	case WM_T_PCH_SPT:
   13505 		/* Disable gig during WOL */
   13506 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13507 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13508 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13509 		if (sc->sc_type == WM_T_PCH)
   13510 			wm_gmii_reset(sc);
   13511 
   13512 		/* Power down workaround */
   13513 		if (sc->sc_phytype == WMPHY_82577) {
   13514 			struct mii_softc *child;
   13515 
   13516 			/* Assume that the PHY is copper */
   13517 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13518 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13519 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13520 				    (768 << 5) | 25, 0x0444); /* magic num */
   13521 		}
   13522 		break;
   13523 	default:
   13524 		break;
   13525 	}
   13526 
   13527 	/* Keep the laser running on fiber adapters */
   13528 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13529 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13530 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13531 		reg |= CTRL_EXT_SWDPIN(3);
   13532 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13533 	}
   13534 
   13535 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13536 #if 0	/* for the multicast packet */
   13537 	reg |= WUFC_MC;
   13538 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13539 #endif
   13540 
   13541 	if (sc->sc_type >= WM_T_PCH)
   13542 		wm_enable_phy_wakeup(sc);
   13543 	else {
   13544 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13545 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13546 	}
   13547 
   13548 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13549 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13550 		|| (sc->sc_type == WM_T_PCH2))
   13551 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13552 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13553 
   13554 	/* Request PME */
   13555 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13556 #if 0
   13557 	/* Disable WOL */
   13558 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13559 #else
   13560 	/* For WOL */
   13561 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13562 #endif
   13563 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13564 }
   13565 
   13566 /* LPLU */
   13567 
   13568 static void
   13569 wm_lplu_d0_disable(struct wm_softc *sc)
   13570 {
   13571 	struct mii_data *mii = &sc->sc_mii;
   13572 	uint32_t reg;
   13573 
   13574 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13575 		device_xname(sc->sc_dev), __func__));
   13576 
   13577 	if (sc->sc_phytype == WMPHY_IFE)
   13578 		return;
   13579 
   13580 	switch (sc->sc_type) {
   13581 	case WM_T_82571:
   13582 	case WM_T_82572:
   13583 	case WM_T_82573:
   13584 	case WM_T_82575:
   13585 	case WM_T_82576:
   13586 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13587 		reg &= ~PMR_D0_LPLU;
   13588 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13589 		break;
   13590 	case WM_T_82580:
   13591 	case WM_T_I350:
   13592 	case WM_T_I210:
   13593 	case WM_T_I211:
   13594 		reg = CSR_READ(sc, WMREG_PHPM);
   13595 		reg &= ~PHPM_D0A_LPLU;
   13596 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13597 		break;
   13598 	case WM_T_82574:
   13599 	case WM_T_82583:
   13600 	case WM_T_ICH8:
   13601 	case WM_T_ICH9:
   13602 	case WM_T_ICH10:
   13603 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13604 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13605 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13606 		CSR_WRITE_FLUSH(sc);
   13607 		break;
   13608 	case WM_T_PCH:
   13609 	case WM_T_PCH2:
   13610 	case WM_T_PCH_LPT:
   13611 	case WM_T_PCH_SPT:
   13612 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13613 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13614 		if (wm_phy_resetisblocked(sc) == false)
   13615 			reg |= HV_OEM_BITS_ANEGNOW;
   13616 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13617 		break;
   13618 	default:
   13619 		break;
   13620 	}
   13621 }
   13622 
   13623 /* EEE */
   13624 
   13625 static void
   13626 wm_set_eee_i350(struct wm_softc *sc)
   13627 {
   13628 	uint32_t ipcnfg, eeer;
   13629 
   13630 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13631 	eeer = CSR_READ(sc, WMREG_EEER);
   13632 
   13633 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13634 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13635 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13636 		    | EEER_LPI_FC);
   13637 	} else {
   13638 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13639 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13640 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13641 		    | EEER_LPI_FC);
   13642 	}
   13643 
   13644 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13645 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13646 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13647 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13648 }
   13649 
   13650 /*
   13651  * Workarounds (mainly PHY related).
   13652  * Basically, PHY's workarounds are in the PHY drivers.
   13653  */
   13654 
   13655 /* Work-around for 82566 Kumeran PCS lock loss */
   13656 static void
   13657 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13658 {
   13659 	struct mii_data *mii = &sc->sc_mii;
   13660 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13661 	int i;
   13662 	int reg;
   13663 
   13664 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13665 		device_xname(sc->sc_dev), __func__));
   13666 
   13667 	/* If the link is not up, do nothing */
   13668 	if ((status & STATUS_LU) == 0)
   13669 		return;
   13670 
   13671 	/* Nothing to do if the link is other than 1Gbps */
   13672 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13673 		return;
   13674 
   13675 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13676 	for (i = 0; i < 10; i++) {
   13677 		/* read twice */
   13678 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13679 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13680 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13681 			goto out;	/* GOOD! */
   13682 
   13683 		/* Reset the PHY */
   13684 		wm_reset_phy(sc);
   13685 		delay(5*1000);
   13686 	}
   13687 
   13688 	/* Disable GigE link negotiation */
   13689 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13690 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13691 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13692 
   13693 	/*
   13694 	 * Call gig speed drop workaround on Gig disable before accessing
   13695 	 * any PHY registers.
   13696 	 */
   13697 	wm_gig_downshift_workaround_ich8lan(sc);
   13698 
   13699 out:
   13700 	return;
   13701 }
   13702 
   13703 /* WOL from S5 stops working */
   13704 static void
   13705 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13706 {
   13707 	uint16_t kmrn_reg;
   13708 
   13709 	/* Only for igp3 */
   13710 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13711 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13712 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13713 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13714 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13715 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13716 	}
   13717 }
   13718 
   13719 /*
   13720  * Workaround for pch's PHYs
   13721  * XXX should be moved to new PHY driver?
   13722  */
   13723 static void
   13724 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13725 {
   13726 
   13727 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13728 		device_xname(sc->sc_dev), __func__));
   13729 	KASSERT(sc->sc_type == WM_T_PCH);
   13730 
   13731 	if (sc->sc_phytype == WMPHY_82577)
   13732 		wm_set_mdio_slow_mode_hv(sc);
   13733 
   13734 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13735 
   13736 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13737 
   13738 	/* 82578 */
   13739 	if (sc->sc_phytype == WMPHY_82578) {
   13740 		struct mii_softc *child;
   13741 
   13742 		/*
   13743 		 * Return registers to default by doing a soft reset then
   13744 		 * writing 0x3140 to the control register
   13745 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13746 		 */
   13747 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13748 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13749 			PHY_RESET(child);
   13750 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13751 			    0x3140);
   13752 		}
   13753 	}
   13754 
   13755 	/* Select page 0 */
   13756 	sc->phy.acquire(sc);
   13757 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13758 	sc->phy.release(sc);
   13759 
   13760 	/*
   13761 	 * Configure the K1 Si workaround during phy reset assuming there is
   13762 	 * link so that it disables K1 if link is in 1Gbps.
   13763 	 */
   13764 	wm_k1_gig_workaround_hv(sc, 1);
   13765 }
   13766 
   13767 static void
   13768 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13769 {
   13770 
   13771 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13772 		device_xname(sc->sc_dev), __func__));
   13773 	KASSERT(sc->sc_type == WM_T_PCH2);
   13774 
   13775 	wm_set_mdio_slow_mode_hv(sc);
   13776 }
   13777 
   13778 static int
   13779 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13780 {
   13781 	int k1_enable = sc->sc_nvm_k1_enabled;
   13782 
   13783 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13784 		device_xname(sc->sc_dev), __func__));
   13785 
   13786 	if (sc->phy.acquire(sc) != 0)
   13787 		return -1;
   13788 
   13789 	if (link) {
   13790 		k1_enable = 0;
   13791 
   13792 		/* Link stall fix for link up */
   13793 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13794 	} else {
   13795 		/* Link stall fix for link down */
   13796 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13797 	}
   13798 
   13799 	wm_configure_k1_ich8lan(sc, k1_enable);
   13800 	sc->phy.release(sc);
   13801 
   13802 	return 0;
   13803 }
   13804 
   13805 static void
   13806 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13807 {
   13808 	uint32_t reg;
   13809 
   13810 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13811 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13812 	    reg | HV_KMRN_MDIO_SLOW);
   13813 }
   13814 
   13815 static void
   13816 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13817 {
   13818 	uint32_t ctrl, ctrl_ext, tmp;
   13819 	uint16_t kmrn_reg;
   13820 
   13821 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13822 
   13823 	if (k1_enable)
   13824 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13825 	else
   13826 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13827 
   13828 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13829 
   13830 	delay(20);
   13831 
   13832 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13833 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13834 
   13835 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13836 	tmp |= CTRL_FRCSPD;
   13837 
   13838 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13839 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13840 	CSR_WRITE_FLUSH(sc);
   13841 	delay(20);
   13842 
   13843 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13844 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13845 	CSR_WRITE_FLUSH(sc);
   13846 	delay(20);
   13847 }
   13848 
   13849 /* special case - for 82575 - need to do manual init ... */
   13850 static void
   13851 wm_reset_init_script_82575(struct wm_softc *sc)
   13852 {
   13853 	/*
   13854 	 * remark: this is untested code - we have no board without EEPROM
   13855 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13856 	 */
   13857 
   13858 	/* SerDes configuration via SERDESCTRL */
   13859 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13862 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13863 
   13864 	/* CCM configuration via CCMCTL register */
   13865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13867 
   13868 	/* PCIe lanes configuration */
   13869 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13872 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13873 
   13874 	/* PCIe PLL Configuration */
   13875 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13876 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13877 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13878 }
   13879 
   13880 static void
   13881 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13882 {
   13883 	uint32_t reg;
   13884 	uint16_t nvmword;
   13885 	int rv;
   13886 
   13887 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13888 		return;
   13889 
   13890 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13891 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13892 	if (rv != 0) {
   13893 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13894 		    __func__);
   13895 		return;
   13896 	}
   13897 
   13898 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13899 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13900 		reg |= MDICNFG_DEST;
   13901 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13902 		reg |= MDICNFG_COM_MDIO;
   13903 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13904 }
   13905 
   13906 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13907 
   13908 static bool
   13909 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13910 {
   13911 	int i;
   13912 	uint32_t reg;
   13913 	uint16_t id1, id2;
   13914 
   13915 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13916 		device_xname(sc->sc_dev), __func__));
   13917 	id1 = id2 = 0xffff;
   13918 	for (i = 0; i < 2; i++) {
   13919 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13920 		if (MII_INVALIDID(id1))
   13921 			continue;
   13922 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13923 		if (MII_INVALIDID(id2))
   13924 			continue;
   13925 		break;
   13926 	}
   13927 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13928 		goto out;
   13929 	}
   13930 
   13931 	if (sc->sc_type < WM_T_PCH_LPT) {
   13932 		sc->phy.release(sc);
   13933 		wm_set_mdio_slow_mode_hv(sc);
   13934 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13935 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13936 		sc->phy.acquire(sc);
   13937 	}
   13938 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13939 		printf("XXX return with false\n");
   13940 		return false;
   13941 	}
   13942 out:
   13943 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13944 		/* Only unforce SMBus if ME is not active */
   13945 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13946 			/* Unforce SMBus mode in PHY */
   13947 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13948 			    CV_SMB_CTRL);
   13949 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13950 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13951 			    CV_SMB_CTRL, reg);
   13952 
   13953 			/* Unforce SMBus mode in MAC */
   13954 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13955 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13956 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13957 		}
   13958 	}
   13959 	return true;
   13960 }
   13961 
   13962 static void
   13963 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13964 {
   13965 	uint32_t reg;
   13966 	int i;
   13967 
   13968 	/* Set PHY Config Counter to 50msec */
   13969 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13970 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13971 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13972 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13973 
   13974 	/* Toggle LANPHYPC */
   13975 	reg = CSR_READ(sc, WMREG_CTRL);
   13976 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13977 	reg &= ~CTRL_LANPHYPC_VALUE;
   13978 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13979 	CSR_WRITE_FLUSH(sc);
   13980 	delay(1000);
   13981 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13982 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13983 	CSR_WRITE_FLUSH(sc);
   13984 
   13985 	if (sc->sc_type < WM_T_PCH_LPT)
   13986 		delay(50 * 1000);
   13987 	else {
   13988 		i = 20;
   13989 
   13990 		do {
   13991 			delay(5 * 1000);
   13992 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13993 		    && i--);
   13994 
   13995 		delay(30 * 1000);
   13996 	}
   13997 }
   13998 
   13999 static int
   14000 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14001 {
   14002 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14003 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14004 	uint32_t rxa;
   14005 	uint16_t scale = 0, lat_enc = 0;
   14006 	int32_t obff_hwm = 0;
   14007 	int64_t lat_ns, value;
   14008 
   14009 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14010 		device_xname(sc->sc_dev), __func__));
   14011 
   14012 	if (link) {
   14013 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14014 		uint32_t status;
   14015 		uint16_t speed;
   14016 		pcireg_t preg;
   14017 
   14018 		status = CSR_READ(sc, WMREG_STATUS);
   14019 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14020 		case STATUS_SPEED_10:
   14021 			speed = 10;
   14022 			break;
   14023 		case STATUS_SPEED_100:
   14024 			speed = 100;
   14025 			break;
   14026 		case STATUS_SPEED_1000:
   14027 			speed = 1000;
   14028 			break;
   14029 		default:
   14030 			device_printf(sc->sc_dev, "Unknown speed "
   14031 			    "(status = %08x)\n", status);
   14032 			return -1;
   14033 		}
   14034 
   14035 		/* Rx Packet Buffer Allocation size (KB) */
   14036 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14037 
   14038 		/*
   14039 		 * Determine the maximum latency tolerated by the device.
   14040 		 *
   14041 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14042 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14043 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14044 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14045 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14046 		 */
   14047 		lat_ns = ((int64_t)rxa * 1024 -
   14048 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14049 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14050 		if (lat_ns < 0)
   14051 			lat_ns = 0;
   14052 		else
   14053 			lat_ns /= speed;
   14054 		value = lat_ns;
   14055 
   14056 		while (value > LTRV_VALUE) {
   14057 			scale ++;
   14058 			value = howmany(value, __BIT(5));
   14059 		}
   14060 		if (scale > LTRV_SCALE_MAX) {
   14061 			printf("%s: Invalid LTR latency scale %d\n",
   14062 			    device_xname(sc->sc_dev), scale);
   14063 			return -1;
   14064 		}
   14065 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14066 
   14067 		/* Determine the maximum latency tolerated by the platform */
   14068 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14069 		    WM_PCI_LTR_CAP_LPT);
   14070 		max_snoop = preg & 0xffff;
   14071 		max_nosnoop = preg >> 16;
   14072 
   14073 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14074 
   14075 		if (lat_enc > max_ltr_enc) {
   14076 			lat_enc = max_ltr_enc;
   14077 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14078 			    * PCI_LTR_SCALETONS(
   14079 				    __SHIFTOUT(lat_enc,
   14080 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14081 		}
   14082 
   14083 		if (lat_ns) {
   14084 			lat_ns *= speed * 1000;
   14085 			lat_ns /= 8;
   14086 			lat_ns /= 1000000000;
   14087 			obff_hwm = (int32_t)(rxa - lat_ns);
   14088 		}
   14089 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14090 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14091 			    "(rxa = %d, lat_ns = %d)\n",
   14092 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14093 			return -1;
   14094 		}
   14095 	}
   14096 	/* Snoop and No-Snoop latencies the same */
   14097 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14098 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14099 
   14100 	/* Set OBFF high water mark */
   14101 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14102 	reg |= obff_hwm;
   14103 	CSR_WRITE(sc, WMREG_SVT, reg);
   14104 
   14105 	/* Enable OBFF */
   14106 	reg = CSR_READ(sc, WMREG_SVCR);
   14107 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14108 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14109 
   14110 	return 0;
   14111 }
   14112 
   14113 /*
   14114  * I210 Errata 25 and I211 Errata 10
   14115  * Slow System Clock.
   14116  */
   14117 static void
   14118 wm_pll_workaround_i210(struct wm_softc *sc)
   14119 {
   14120 	uint32_t mdicnfg, wuc;
   14121 	uint32_t reg;
   14122 	pcireg_t pcireg;
   14123 	uint32_t pmreg;
   14124 	uint16_t nvmword, tmp_nvmword;
   14125 	int phyval;
   14126 	bool wa_done = false;
   14127 	int i;
   14128 
   14129 	/* Save WUC and MDICNFG registers */
   14130 	wuc = CSR_READ(sc, WMREG_WUC);
   14131 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14132 
   14133 	reg = mdicnfg & ~MDICNFG_DEST;
   14134 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14135 
   14136 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14137 		nvmword = INVM_DEFAULT_AL;
   14138 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14139 
   14140 	/* Get Power Management cap offset */
   14141 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14142 		&pmreg, NULL) == 0)
   14143 		return;
   14144 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14145 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14146 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14147 
   14148 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14149 			break; /* OK */
   14150 		}
   14151 
   14152 		wa_done = true;
   14153 		/* Directly reset the internal PHY */
   14154 		reg = CSR_READ(sc, WMREG_CTRL);
   14155 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14156 
   14157 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14158 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14159 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14160 
   14161 		CSR_WRITE(sc, WMREG_WUC, 0);
   14162 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14163 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14164 
   14165 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14166 		    pmreg + PCI_PMCSR);
   14167 		pcireg |= PCI_PMCSR_STATE_D3;
   14168 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14169 		    pmreg + PCI_PMCSR, pcireg);
   14170 		delay(1000);
   14171 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14172 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14173 		    pmreg + PCI_PMCSR, pcireg);
   14174 
   14175 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14176 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14177 
   14178 		/* Restore WUC register */
   14179 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14180 	}
   14181 
   14182 	/* Restore MDICNFG setting */
   14183 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14184 	if (wa_done)
   14185 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14186 }
   14187 
   14188 static void
   14189 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14190 {
   14191 	uint32_t reg;
   14192 
   14193 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14194 		device_xname(sc->sc_dev), __func__));
   14195 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14196 
   14197 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14198 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14199 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14200 
   14201 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14202 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14203 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14204 }
   14205