Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.501
      1 /*	$NetBSD: if_wm.c,v 1.501 2017/03/24 09:59:05 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.501 2017/03/24 09:59:05 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * legacy and msi use sc_ihs[0].
    481 					 */
    482 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    483 	int sc_nintrs;			/* number of interrupts */
    484 
    485 	int sc_link_intr_idx;		/* index of MSI-X tables */
    486 
    487 	callout_t sc_tick_ch;		/* tick callout */
    488 	bool sc_core_stopping;
    489 
    490 	int sc_nvm_ver_major;
    491 	int sc_nvm_ver_minor;
    492 	int sc_nvm_ver_build;
    493 	int sc_nvm_addrbits;		/* NVM address bits */
    494 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    495 	int sc_ich8_flash_base;
    496 	int sc_ich8_flash_bank_size;
    497 	int sc_nvm_k1_enabled;
    498 
    499 	int sc_nqueues;
    500 	struct wm_queue *sc_queue;
    501 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    502 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    503 
    504 	int sc_affinity_offset;
    505 
    506 #ifdef WM_EVENT_COUNTERS
    507 	/* Event counters. */
    508 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    509 
    510         /* WM_T_82542_2_1 only */
    511 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    512 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    513 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    514 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    515 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    516 #endif /* WM_EVENT_COUNTERS */
    517 
    518 	/* This variable are used only on the 82547. */
    519 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    520 
    521 	uint32_t sc_ctrl;		/* prototype CTRL register */
    522 #if 0
    523 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    524 #endif
    525 	uint32_t sc_icr;		/* prototype interrupt bits */
    526 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    527 	uint32_t sc_tctl;		/* prototype TCTL register */
    528 	uint32_t sc_rctl;		/* prototype RCTL register */
    529 	uint32_t sc_txcw;		/* prototype TXCW register */
    530 	uint32_t sc_tipg;		/* prototype TIPG register */
    531 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    532 	uint32_t sc_pba;		/* prototype PBA register */
    533 
    534 	int sc_tbi_linkup;		/* TBI link status */
    535 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    536 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    537 
    538 	int sc_mchash_type;		/* multicast filter offset */
    539 
    540 	krndsource_t rnd_source;	/* random source */
    541 
    542 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    543 
    544 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    545 	kmutex_t *sc_ich_phymtx;	/*
    546 					 * 82574/82583/ICH/PCH specific PHY
    547 					 * mutex. For 82574/82583, the mutex
    548 					 * is used for both PHY and NVM.
    549 					 */
    550 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    551 
    552 	struct wm_phyop phy;
    553 };
    554 
    555 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    556 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    557 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    558 
    559 #define	WM_RXCHAIN_RESET(rxq)						\
    560 do {									\
    561 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    562 	*(rxq)->rxq_tailp = NULL;					\
    563 	(rxq)->rxq_len = 0;						\
    564 } while (/*CONSTCOND*/0)
    565 
    566 #define	WM_RXCHAIN_LINK(rxq, m)						\
    567 do {									\
    568 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    569 	(rxq)->rxq_tailp = &(m)->m_next;				\
    570 } while (/*CONSTCOND*/0)
    571 
    572 #ifdef WM_EVENT_COUNTERS
    573 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    574 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    575 
    576 #define WM_Q_EVCNT_INCR(qname, evname)			\
    577 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    578 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    579 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    580 #else /* !WM_EVENT_COUNTERS */
    581 #define	WM_EVCNT_INCR(ev)	/* nothing */
    582 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    583 
    584 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    585 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    586 #endif /* !WM_EVENT_COUNTERS */
    587 
    588 #define	CSR_READ(sc, reg)						\
    589 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    590 #define	CSR_WRITE(sc, reg, val)						\
    591 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    592 #define	CSR_WRITE_FLUSH(sc)						\
    593 	(void) CSR_READ((sc), WMREG_STATUS)
    594 
    595 #define ICH8_FLASH_READ32(sc, reg)					\
    596 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    597 	    (reg) + sc->sc_flashreg_offset)
    598 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    599 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    600 	    (reg) + sc->sc_flashreg_offset, (data))
    601 
    602 #define ICH8_FLASH_READ16(sc, reg)					\
    603 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset)
    605 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    606 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    607 	    (reg) + sc->sc_flashreg_offset, (data))
    608 
    609 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    610 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    611 
    612 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    613 #define	WM_CDTXADDR_HI(txq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    616 
    617 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    618 #define	WM_CDRXADDR_HI(rxq, x)						\
    619 	(sizeof(bus_addr_t) == 8 ?					\
    620 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    621 
    622 /*
    623  * Register read/write functions.
    624  * Other than CSR_{READ|WRITE}().
    625  */
    626 #if 0
    627 static inline uint32_t wm_io_read(struct wm_softc *, int);
    628 #endif
    629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    631 	uint32_t, uint32_t);
    632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    633 
    634 /*
    635  * Descriptor sync/init functions.
    636  */
    637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    640 
    641 /*
    642  * Device driver interface functions and commonly used functions.
    643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    644  */
    645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    646 static int	wm_match(device_t, cfdata_t, void *);
    647 static void	wm_attach(device_t, device_t, void *);
    648 static int	wm_detach(device_t, int);
    649 static bool	wm_suspend(device_t, const pmf_qual_t *);
    650 static bool	wm_resume(device_t, const pmf_qual_t *);
    651 static void	wm_watchdog(struct ifnet *);
    652 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    653 static void	wm_tick(void *);
    654 static int	wm_ifflags_cb(struct ethercom *);
    655 static int	wm_ioctl(struct ifnet *, u_long, void *);
    656 /* MAC address related */
    657 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    658 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    659 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    660 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    661 static void	wm_set_filter(struct wm_softc *);
    662 /* Reset and init related */
    663 static void	wm_set_vlan(struct wm_softc *);
    664 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    665 static void	wm_get_auto_rd_done(struct wm_softc *);
    666 static void	wm_lan_init_done(struct wm_softc *);
    667 static void	wm_get_cfg_done(struct wm_softc *);
    668 static void	wm_initialize_hardware_bits(struct wm_softc *);
    669 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    670 static void	wm_reset_phy(struct wm_softc *);
    671 static void	wm_flush_desc_rings(struct wm_softc *);
    672 static void	wm_reset(struct wm_softc *);
    673 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    674 static void	wm_rxdrain(struct wm_rxqueue *);
    675 static void	wm_rss_getkey(uint8_t *);
    676 static void	wm_init_rss(struct wm_softc *);
    677 static void	wm_adjust_qnum(struct wm_softc *, int);
    678 static int	wm_softint_establish(struct wm_softc *, int, int);
    679 static int	wm_setup_legacy(struct wm_softc *);
    680 static int	wm_setup_msix(struct wm_softc *);
    681 static int	wm_init(struct ifnet *);
    682 static int	wm_init_locked(struct ifnet *);
    683 static void	wm_turnon(struct wm_softc *);
    684 static void	wm_turnoff(struct wm_softc *);
    685 static void	wm_stop(struct ifnet *, int);
    686 static void	wm_stop_locked(struct ifnet *, int);
    687 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    688 static void	wm_82547_txfifo_stall(void *);
    689 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    690 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    691 /* DMA related */
    692 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    693 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    694 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    695 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    696     struct wm_txqueue *);
    697 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    698 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    699 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    700     struct wm_rxqueue *);
    701 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    703 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    704 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    705 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    706 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    707 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    708     struct wm_txqueue *);
    709 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    710     struct wm_rxqueue *);
    711 static int	wm_alloc_txrx_queues(struct wm_softc *);
    712 static void	wm_free_txrx_queues(struct wm_softc *);
    713 static int	wm_init_txrx_queues(struct wm_softc *);
    714 /* Start */
    715 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    716     struct wm_txsoft *, uint32_t *, uint8_t *);
    717 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    718 static void	wm_start(struct ifnet *);
    719 static void	wm_start_locked(struct ifnet *);
    720 static int	wm_transmit(struct ifnet *, struct mbuf *);
    721 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    722 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    723 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    724     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    725 static void	wm_nq_start(struct ifnet *);
    726 static void	wm_nq_start_locked(struct ifnet *);
    727 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    728 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    729 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    730 static void	wm_deferred_start_locked(struct wm_txqueue *);
    731 static void	wm_handle_queue(void *);
    732 /* Interrupt */
    733 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    734 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    735 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    736 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    737 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    738 static void	wm_linkintr(struct wm_softc *, uint32_t);
    739 static int	wm_intr_legacy(void *);
    740 static inline void	wm_txrxintr_disable(struct wm_queue *);
    741 static inline void	wm_txrxintr_enable(struct wm_queue *);
    742 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    743 static int	wm_txrxintr_msix(void *);
    744 static int	wm_linkintr_msix(void *);
    745 
    746 /*
    747  * Media related.
    748  * GMII, SGMII, TBI, SERDES and SFP.
    749  */
    750 /* Common */
    751 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    752 /* GMII related */
    753 static void	wm_gmii_reset(struct wm_softc *);
    754 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    755 static int	wm_get_phy_id_82575(struct wm_softc *);
    756 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    757 static int	wm_gmii_mediachange(struct ifnet *);
    758 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    759 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    760 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    761 static int	wm_gmii_i82543_readreg(device_t, int, int);
    762 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    763 static int	wm_gmii_mdic_readreg(device_t, int, int);
    764 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    765 static int	wm_gmii_i82544_readreg(device_t, int, int);
    766 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    767 static int	wm_gmii_i80003_readreg(device_t, int, int);
    768 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    769 static int	wm_gmii_bm_readreg(device_t, int, int);
    770 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    771 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    772 static int	wm_gmii_hv_readreg(device_t, int, int);
    773 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    774 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    775 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    776 static int	wm_gmii_82580_readreg(device_t, int, int);
    777 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    778 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    779 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    780 static void	wm_gmii_statchg(struct ifnet *);
    781 /*
    782  * kumeran related (80003, ICH* and PCH*).
    783  * These functions are not for accessing MII registers but for accessing
    784  * kumeran specific registers.
    785  */
    786 static int	wm_kmrn_readreg(struct wm_softc *, int);
    787 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    788 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    789 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    790 /* SGMII */
    791 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    792 static int	wm_sgmii_readreg(device_t, int, int);
    793 static void	wm_sgmii_writereg(device_t, int, int, int);
    794 /* TBI related */
    795 static void	wm_tbi_mediainit(struct wm_softc *);
    796 static int	wm_tbi_mediachange(struct ifnet *);
    797 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    798 static int	wm_check_for_link(struct wm_softc *);
    799 static void	wm_tbi_tick(struct wm_softc *);
    800 /* SERDES related */
    801 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    802 static int	wm_serdes_mediachange(struct ifnet *);
    803 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    804 static void	wm_serdes_tick(struct wm_softc *);
    805 /* SFP related */
    806 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    807 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    808 
    809 /*
    810  * NVM related.
    811  * Microwire, SPI (w/wo EERD) and Flash.
    812  */
    813 /* Misc functions */
    814 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    815 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    816 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    817 /* Microwire */
    818 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    819 /* SPI */
    820 static int	wm_nvm_ready_spi(struct wm_softc *);
    821 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    822 /* Using with EERD */
    823 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    824 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    825 /* Flash */
    826 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    827     unsigned int *);
    828 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    829 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    830 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    831 	uint32_t *);
    832 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    833 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    834 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    835 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    836 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    837 /* iNVM */
    838 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    839 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    840 /* Lock, detecting NVM type, validate checksum and read */
    841 static int	wm_nvm_acquire(struct wm_softc *);
    842 static void	wm_nvm_release(struct wm_softc *);
    843 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    844 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    845 static int	wm_nvm_validate_checksum(struct wm_softc *);
    846 static void	wm_nvm_version_invm(struct wm_softc *);
    847 static void	wm_nvm_version(struct wm_softc *);
    848 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    849 
    850 /*
    851  * Hardware semaphores.
    852  * Very complexed...
    853  */
    854 static int	wm_get_null(struct wm_softc *);
    855 static void	wm_put_null(struct wm_softc *);
    856 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    857 static void	wm_put_swsm_semaphore(struct wm_softc *);
    858 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    859 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    860 static int	wm_get_phy_82575(struct wm_softc *);
    861 static void	wm_put_phy_82575(struct wm_softc *);
    862 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    863 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    864 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    865 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    866 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    867 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    868 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    869 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    870 
    871 /*
    872  * Management mode and power management related subroutines.
    873  * BMC, AMT, suspend/resume and EEE.
    874  */
    875 #if 0
    876 static int	wm_check_mng_mode(struct wm_softc *);
    877 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    878 static int	wm_check_mng_mode_82574(struct wm_softc *);
    879 static int	wm_check_mng_mode_generic(struct wm_softc *);
    880 #endif
    881 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    882 static bool	wm_phy_resetisblocked(struct wm_softc *);
    883 static void	wm_get_hw_control(struct wm_softc *);
    884 static void	wm_release_hw_control(struct wm_softc *);
    885 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    886 static void	wm_smbustopci(struct wm_softc *);
    887 static void	wm_init_manageability(struct wm_softc *);
    888 static void	wm_release_manageability(struct wm_softc *);
    889 static void	wm_get_wakeup(struct wm_softc *);
    890 static void	wm_ulp_disable(struct wm_softc *);
    891 static void	wm_enable_phy_wakeup(struct wm_softc *);
    892 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    893 static void	wm_enable_wakeup(struct wm_softc *);
    894 /* LPLU (Low Power Link Up) */
    895 static void	wm_lplu_d0_disable(struct wm_softc *);
    896 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    897 /* EEE */
    898 static void	wm_set_eee_i350(struct wm_softc *);
    899 
    900 /*
    901  * Workarounds (mainly PHY related).
    902  * Basically, PHY's workarounds are in the PHY drivers.
    903  */
    904 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    905 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    906 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    907 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    908 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    909 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    910 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    911 static void	wm_reset_init_script_82575(struct wm_softc *);
    912 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    913 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    914 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    915 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    916 static void	wm_pll_workaround_i210(struct wm_softc *);
    917 
    918 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    919     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    920 
    921 /*
    922  * Devices supported by this driver.
    923  */
    924 static const struct wm_product {
    925 	pci_vendor_id_t		wmp_vendor;
    926 	pci_product_id_t	wmp_product;
    927 	const char		*wmp_name;
    928 	wm_chip_type		wmp_type;
    929 	uint32_t		wmp_flags;
    930 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    931 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    932 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    933 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    934 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    935 } wm_products[] = {
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    937 	  "Intel i82542 1000BASE-X Ethernet",
    938 	  WM_T_82542_2_1,	WMP_F_FIBER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    941 	  "Intel i82543GC 1000BASE-X Ethernet",
    942 	  WM_T_82543,		WMP_F_FIBER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    945 	  "Intel i82543GC 1000BASE-T Ethernet",
    946 	  WM_T_82543,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    949 	  "Intel i82544EI 1000BASE-T Ethernet",
    950 	  WM_T_82544,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    953 	  "Intel i82544EI 1000BASE-X Ethernet",
    954 	  WM_T_82544,		WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    957 	  "Intel i82544GC 1000BASE-T Ethernet",
    958 	  WM_T_82544,		WMP_F_COPPER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    961 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    962 	  WM_T_82544,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    965 	  "Intel i82540EM 1000BASE-T Ethernet",
    966 	  WM_T_82540,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    969 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    970 	  WM_T_82540,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    973 	  "Intel i82540EP 1000BASE-T Ethernet",
    974 	  WM_T_82540,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    977 	  "Intel i82540EP 1000BASE-T Ethernet",
    978 	  WM_T_82540,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    981 	  "Intel i82540EP 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    985 	  "Intel i82545EM 1000BASE-T Ethernet",
    986 	  WM_T_82545,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    989 	  "Intel i82545GM 1000BASE-T Ethernet",
    990 	  WM_T_82545_3,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    993 	  "Intel i82545GM 1000BASE-X Ethernet",
    994 	  WM_T_82545_3,		WMP_F_FIBER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    997 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    998 	  WM_T_82545_3,		WMP_F_SERDES },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1001 	  "Intel i82546EB 1000BASE-T Ethernet",
   1002 	  WM_T_82546,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1005 	  "Intel i82546EB 1000BASE-T Ethernet",
   1006 	  WM_T_82546,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1009 	  "Intel i82545EM 1000BASE-X Ethernet",
   1010 	  WM_T_82545,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1013 	  "Intel i82546EB 1000BASE-X Ethernet",
   1014 	  WM_T_82546,		WMP_F_FIBER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1017 	  "Intel i82546GB 1000BASE-T Ethernet",
   1018 	  WM_T_82546_3,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1021 	  "Intel i82546GB 1000BASE-X Ethernet",
   1022 	  WM_T_82546_3,		WMP_F_FIBER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1025 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1026 	  WM_T_82546_3,		WMP_F_SERDES },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1029 	  "i82546GB quad-port Gigabit Ethernet",
   1030 	  WM_T_82546_3,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1033 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1037 	  "Intel PRO/1000MT (82546GB)",
   1038 	  WM_T_82546_3,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1041 	  "Intel i82541EI 1000BASE-T Ethernet",
   1042 	  WM_T_82541,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1045 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1046 	  WM_T_82541,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1049 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1050 	  WM_T_82541,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1053 	  "Intel i82541ER 1000BASE-T Ethernet",
   1054 	  WM_T_82541_2,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1057 	  "Intel i82541GI 1000BASE-T Ethernet",
   1058 	  WM_T_82541_2,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1061 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1062 	  WM_T_82541_2,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1065 	  "Intel i82541PI 1000BASE-T Ethernet",
   1066 	  WM_T_82541_2,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1069 	  "Intel i82547EI 1000BASE-T Ethernet",
   1070 	  WM_T_82547,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1073 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1074 	  WM_T_82547,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1077 	  "Intel i82547GI 1000BASE-T Ethernet",
   1078 	  WM_T_82547_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1081 	  "Intel PRO/1000 PT (82571EB)",
   1082 	  WM_T_82571,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1085 	  "Intel PRO/1000 PF (82571EB)",
   1086 	  WM_T_82571,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1089 	  "Intel PRO/1000 PB (82571EB)",
   1090 	  WM_T_82571,		WMP_F_SERDES },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1093 	  "Intel PRO/1000 QT (82571EB)",
   1094 	  WM_T_82571,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1097 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1098 	  WM_T_82571,		WMP_F_COPPER, },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1101 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1102 	  WM_T_82571,		WMP_F_COPPER, },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1105 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1106 	  WM_T_82571,		WMP_F_SERDES, },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1109 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1110 	  WM_T_82571,		WMP_F_SERDES, },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1113 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1114 	  WM_T_82571,		WMP_F_FIBER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1117 	  "Intel i82572EI 1000baseT Ethernet",
   1118 	  WM_T_82572,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1121 	  "Intel i82572EI 1000baseX Ethernet",
   1122 	  WM_T_82572,		WMP_F_FIBER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1125 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82572,		WMP_F_SERDES },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1129 	  "Intel i82572EI 1000baseT Ethernet",
   1130 	  WM_T_82572,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1133 	  "Intel i82573E",
   1134 	  WM_T_82573,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1137 	  "Intel i82573E IAMT",
   1138 	  WM_T_82573,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1141 	  "Intel i82573L Gigabit Ethernet",
   1142 	  WM_T_82573,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1145 	  "Intel i82574L",
   1146 	  WM_T_82574,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1149 	  "Intel i82574L",
   1150 	  WM_T_82574,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1153 	  "Intel i82583V",
   1154 	  WM_T_82583,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1157 	  "i80003 dual 1000baseT Ethernet",
   1158 	  WM_T_80003,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1161 	  "i80003 dual 1000baseX Ethernet",
   1162 	  WM_T_80003,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1165 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1166 	  WM_T_80003,		WMP_F_SERDES },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1169 	  "Intel i80003 1000baseT Ethernet",
   1170 	  WM_T_80003,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1173 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1174 	  WM_T_80003,		WMP_F_SERDES },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1177 	  "Intel i82801H (M_AMT) LAN Controller",
   1178 	  WM_T_ICH8,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1180 	  "Intel i82801H (AMT) LAN Controller",
   1181 	  WM_T_ICH8,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1183 	  "Intel i82801H LAN Controller",
   1184 	  WM_T_ICH8,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1186 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1187 	  WM_T_ICH8,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1189 	  "Intel i82801H (M) LAN Controller",
   1190 	  WM_T_ICH8,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1192 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1193 	  WM_T_ICH8,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1195 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1196 	  WM_T_ICH8,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1198 	  "82567V-3 LAN Controller",
   1199 	  WM_T_ICH8,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1201 	  "82801I (AMT) LAN Controller",
   1202 	  WM_T_ICH9,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1204 	  "82801I 10/100 LAN Controller",
   1205 	  WM_T_ICH9,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1207 	  "82801I (G) 10/100 LAN Controller",
   1208 	  WM_T_ICH9,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1210 	  "82801I (GT) 10/100 LAN Controller",
   1211 	  WM_T_ICH9,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1213 	  "82801I (C) LAN Controller",
   1214 	  WM_T_ICH9,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1216 	  "82801I mobile LAN Controller",
   1217 	  WM_T_ICH9,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1219 	  "82801I mobile (V) LAN Controller",
   1220 	  WM_T_ICH9,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1222 	  "82801I mobile (AMT) LAN Controller",
   1223 	  WM_T_ICH9,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1225 	  "82567LM-4 LAN Controller",
   1226 	  WM_T_ICH9,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1228 	  "82567LM-2 LAN Controller",
   1229 	  WM_T_ICH10,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1231 	  "82567LF-2 LAN Controller",
   1232 	  WM_T_ICH10,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1234 	  "82567LM-3 LAN Controller",
   1235 	  WM_T_ICH10,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1237 	  "82567LF-3 LAN Controller",
   1238 	  WM_T_ICH10,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1240 	  "82567V-2 LAN Controller",
   1241 	  WM_T_ICH10,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1243 	  "82567V-3? LAN Controller",
   1244 	  WM_T_ICH10,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1246 	  "HANKSVILLE LAN Controller",
   1247 	  WM_T_ICH10,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1249 	  "PCH LAN (82577LM) Controller",
   1250 	  WM_T_PCH,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1252 	  "PCH LAN (82577LC) Controller",
   1253 	  WM_T_PCH,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1255 	  "PCH LAN (82578DM) Controller",
   1256 	  WM_T_PCH,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1258 	  "PCH LAN (82578DC) Controller",
   1259 	  WM_T_PCH,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1261 	  "PCH2 LAN (82579LM) Controller",
   1262 	  WM_T_PCH2,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1264 	  "PCH2 LAN (82579V) Controller",
   1265 	  WM_T_PCH2,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1267 	  "82575EB dual-1000baseT Ethernet",
   1268 	  WM_T_82575,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1270 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1271 	  WM_T_82575,		WMP_F_SERDES },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1273 	  "82575GB quad-1000baseT Ethernet",
   1274 	  WM_T_82575,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1276 	  "82575GB quad-1000baseT Ethernet (PM)",
   1277 	  WM_T_82575,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1279 	  "82576 1000BaseT Ethernet",
   1280 	  WM_T_82576,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1282 	  "82576 1000BaseX Ethernet",
   1283 	  WM_T_82576,		WMP_F_FIBER },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1286 	  "82576 gigabit Ethernet (SERDES)",
   1287 	  WM_T_82576,		WMP_F_SERDES },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1290 	  "82576 quad-1000BaseT Ethernet",
   1291 	  WM_T_82576,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1294 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1295 	  WM_T_82576,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1298 	  "82576 gigabit Ethernet",
   1299 	  WM_T_82576,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1305 	  "82576 quad-gigabit Ethernet (SERDES)",
   1306 	  WM_T_82576,		WMP_F_SERDES },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1309 	  "82580 1000BaseT Ethernet",
   1310 	  WM_T_82580,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1312 	  "82580 1000BaseX Ethernet",
   1313 	  WM_T_82580,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1316 	  "82580 1000BaseT Ethernet (SERDES)",
   1317 	  WM_T_82580,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1320 	  "82580 gigabit Ethernet (SGMII)",
   1321 	  WM_T_82580,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1323 	  "82580 dual-1000BaseT Ethernet",
   1324 	  WM_T_82580,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1327 	  "82580 quad-1000BaseX Ethernet",
   1328 	  WM_T_82580,		WMP_F_FIBER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1331 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1332 	  WM_T_82580,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1335 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1336 	  WM_T_82580,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1339 	  "DH89XXCC 1000BASE-KX Ethernet",
   1340 	  WM_T_82580,		WMP_F_SERDES },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1343 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1344 	  WM_T_82580,		WMP_F_SERDES },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1347 	  "I350 Gigabit Network Connection",
   1348 	  WM_T_I350,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1351 	  "I350 Gigabit Fiber Network Connection",
   1352 	  WM_T_I350,		WMP_F_FIBER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1355 	  "I350 Gigabit Backplane Connection",
   1356 	  WM_T_I350,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1359 	  "I350 Quad Port Gigabit Ethernet",
   1360 	  WM_T_I350,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1363 	  "I350 Gigabit Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1367 	  "I354 Gigabit Ethernet (KX)",
   1368 	  WM_T_I354,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1371 	  "I354 Gigabit Ethernet (SGMII)",
   1372 	  WM_T_I354,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1375 	  "I354 Gigabit Ethernet (2.5G)",
   1376 	  WM_T_I354,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1379 	  "I210-T1 Ethernet Server Adapter",
   1380 	  WM_T_I210,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1383 	  "I210 Ethernet (Copper OEM)",
   1384 	  WM_T_I210,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1387 	  "I210 Ethernet (Copper IT)",
   1388 	  WM_T_I210,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1391 	  "I210 Ethernet (FLASH less)",
   1392 	  WM_T_I210,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1395 	  "I210 Gigabit Ethernet (Fiber)",
   1396 	  WM_T_I210,		WMP_F_FIBER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1399 	  "I210 Gigabit Ethernet (SERDES)",
   1400 	  WM_T_I210,		WMP_F_SERDES },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1403 	  "I210 Gigabit Ethernet (FLASH less)",
   1404 	  WM_T_I210,		WMP_F_SERDES },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1407 	  "I210 Gigabit Ethernet (SGMII)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1411 	  "I211 Ethernet (COPPER)",
   1412 	  WM_T_I211,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1414 	  "I217 V Ethernet Connection",
   1415 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1417 	  "I217 LM Ethernet Connection",
   1418 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1420 	  "I218 V Ethernet Connection",
   1421 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1423 	  "I218 V Ethernet Connection",
   1424 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1426 	  "I218 V Ethernet Connection",
   1427 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1429 	  "I218 LM Ethernet Connection",
   1430 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1432 	  "I218 LM Ethernet Connection",
   1433 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1435 	  "I218 LM Ethernet Connection",
   1436 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1437 #if 0
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1439 	  "I219 V Ethernet Connection",
   1440 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1442 	  "I219 V Ethernet Connection",
   1443 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1445 	  "I219 V Ethernet Connection",
   1446 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1448 	  "I219 V Ethernet Connection",
   1449 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1451 	  "I219 LM Ethernet Connection",
   1452 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1454 	  "I219 LM Ethernet Connection",
   1455 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1457 	  "I219 LM Ethernet Connection",
   1458 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1460 	  "I219 LM Ethernet Connection",
   1461 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1463 	  "I219 LM Ethernet Connection",
   1464 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1465 #endif
   1466 	{ 0,			0,
   1467 	  NULL,
   1468 	  0,			0 },
   1469 };
   1470 
   1471 /*
   1472  * Register read/write functions.
   1473  * Other than CSR_{READ|WRITE}().
   1474  */
   1475 
   1476 #if 0 /* Not currently used */
   1477 static inline uint32_t
   1478 wm_io_read(struct wm_softc *sc, int reg)
   1479 {
   1480 
   1481 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1482 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1483 }
   1484 #endif
   1485 
   1486 static inline void
   1487 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1488 {
   1489 
   1490 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1491 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1492 }
   1493 
   1494 static inline void
   1495 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1496     uint32_t data)
   1497 {
   1498 	uint32_t regval;
   1499 	int i;
   1500 
   1501 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1502 
   1503 	CSR_WRITE(sc, reg, regval);
   1504 
   1505 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1506 		delay(5);
   1507 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1508 			break;
   1509 	}
   1510 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1511 		aprint_error("%s: WARNING:"
   1512 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1513 		    device_xname(sc->sc_dev), reg);
   1514 	}
   1515 }
   1516 
   1517 static inline void
   1518 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1519 {
   1520 	wa->wa_low = htole32(v & 0xffffffffU);
   1521 	if (sizeof(bus_addr_t) == 8)
   1522 		wa->wa_high = htole32((uint64_t) v >> 32);
   1523 	else
   1524 		wa->wa_high = 0;
   1525 }
   1526 
   1527 /*
   1528  * Descriptor sync/init functions.
   1529  */
   1530 static inline void
   1531 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1532 {
   1533 	struct wm_softc *sc = txq->txq_sc;
   1534 
   1535 	/* If it will wrap around, sync to the end of the ring. */
   1536 	if ((start + num) > WM_NTXDESC(txq)) {
   1537 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1538 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1539 		    (WM_NTXDESC(txq) - start), ops);
   1540 		num -= (WM_NTXDESC(txq) - start);
   1541 		start = 0;
   1542 	}
   1543 
   1544 	/* Now sync whatever is left. */
   1545 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1546 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1547 }
   1548 
   1549 static inline void
   1550 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1551 {
   1552 	struct wm_softc *sc = rxq->rxq_sc;
   1553 
   1554 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1555 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1556 }
   1557 
   1558 static inline void
   1559 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1560 {
   1561 	struct wm_softc *sc = rxq->rxq_sc;
   1562 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1563 	struct mbuf *m = rxs->rxs_mbuf;
   1564 
   1565 	/*
   1566 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1567 	 * so that the payload after the Ethernet header is aligned
   1568 	 * to a 4-byte boundary.
   1569 
   1570 	 * XXX BRAINDAMAGE ALERT!
   1571 	 * The stupid chip uses the same size for every buffer, which
   1572 	 * is set in the Receive Control register.  We are using the 2K
   1573 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1574 	 * reason, we can't "scoot" packets longer than the standard
   1575 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1576 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1577 	 * the upper layer copy the headers.
   1578 	 */
   1579 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1580 
   1581 	if (sc->sc_type == WM_T_82574) {
   1582 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1583 		rxd->erx_data.erxd_addr =
   1584 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1585 		rxd->erx_data.erxd_dd = 0;
   1586 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1587 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1588 
   1589 		rxd->nqrx_data.nrxd_paddr =
   1590 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1591 		/* Currently, split header is not supported. */
   1592 		rxd->nqrx_data.nrxd_haddr = 0;
   1593 	} else {
   1594 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1595 
   1596 		wm_set_dma_addr(&rxd->wrx_addr,
   1597 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1598 		rxd->wrx_len = 0;
   1599 		rxd->wrx_cksum = 0;
   1600 		rxd->wrx_status = 0;
   1601 		rxd->wrx_errors = 0;
   1602 		rxd->wrx_special = 0;
   1603 	}
   1604 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1605 
   1606 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1607 }
   1608 
   1609 /*
   1610  * Device driver interface functions and commonly used functions.
   1611  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1612  */
   1613 
   1614 /* Lookup supported device table */
   1615 static const struct wm_product *
   1616 wm_lookup(const struct pci_attach_args *pa)
   1617 {
   1618 	const struct wm_product *wmp;
   1619 
   1620 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1621 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1622 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1623 			return wmp;
   1624 	}
   1625 	return NULL;
   1626 }
   1627 
   1628 /* The match function (ca_match) */
   1629 static int
   1630 wm_match(device_t parent, cfdata_t cf, void *aux)
   1631 {
   1632 	struct pci_attach_args *pa = aux;
   1633 
   1634 	if (wm_lookup(pa) != NULL)
   1635 		return 1;
   1636 
   1637 	return 0;
   1638 }
   1639 
   1640 /* The attach function (ca_attach) */
   1641 static void
   1642 wm_attach(device_t parent, device_t self, void *aux)
   1643 {
   1644 	struct wm_softc *sc = device_private(self);
   1645 	struct pci_attach_args *pa = aux;
   1646 	prop_dictionary_t dict;
   1647 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1648 	pci_chipset_tag_t pc = pa->pa_pc;
   1649 	int counts[PCI_INTR_TYPE_SIZE];
   1650 	pci_intr_type_t max_type;
   1651 	const char *eetype, *xname;
   1652 	bus_space_tag_t memt;
   1653 	bus_space_handle_t memh;
   1654 	bus_size_t memsize;
   1655 	int memh_valid;
   1656 	int i, error;
   1657 	const struct wm_product *wmp;
   1658 	prop_data_t ea;
   1659 	prop_number_t pn;
   1660 	uint8_t enaddr[ETHER_ADDR_LEN];
   1661 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1662 	pcireg_t preg, memtype;
   1663 	uint16_t eeprom_data, apme_mask;
   1664 	bool force_clear_smbi;
   1665 	uint32_t link_mode;
   1666 	uint32_t reg;
   1667 
   1668 	sc->sc_dev = self;
   1669 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1670 	sc->sc_core_stopping = false;
   1671 
   1672 	wmp = wm_lookup(pa);
   1673 #ifdef DIAGNOSTIC
   1674 	if (wmp == NULL) {
   1675 		printf("\n");
   1676 		panic("wm_attach: impossible");
   1677 	}
   1678 #endif
   1679 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1680 
   1681 	sc->sc_pc = pa->pa_pc;
   1682 	sc->sc_pcitag = pa->pa_tag;
   1683 
   1684 	if (pci_dma64_available(pa))
   1685 		sc->sc_dmat = pa->pa_dmat64;
   1686 	else
   1687 		sc->sc_dmat = pa->pa_dmat;
   1688 
   1689 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1690 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1691 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1692 
   1693 	sc->sc_type = wmp->wmp_type;
   1694 
   1695 	/* Set default function pointers */
   1696 	sc->phy.acquire = wm_get_null;
   1697 	sc->phy.release = wm_put_null;
   1698 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1699 
   1700 	if (sc->sc_type < WM_T_82543) {
   1701 		if (sc->sc_rev < 2) {
   1702 			aprint_error_dev(sc->sc_dev,
   1703 			    "i82542 must be at least rev. 2\n");
   1704 			return;
   1705 		}
   1706 		if (sc->sc_rev < 3)
   1707 			sc->sc_type = WM_T_82542_2_0;
   1708 	}
   1709 
   1710 	/*
   1711 	 * Disable MSI for Errata:
   1712 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1713 	 *
   1714 	 *  82544: Errata 25
   1715 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1716 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1717 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1718 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1719 	 *
   1720 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1721 	 *
   1722 	 *  82571 & 82572: Errata 63
   1723 	 */
   1724 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1725 	    || (sc->sc_type == WM_T_82572))
   1726 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1727 
   1728 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1729 	    || (sc->sc_type == WM_T_82580)
   1730 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1731 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1732 		sc->sc_flags |= WM_F_NEWQUEUE;
   1733 
   1734 	/* Set device properties (mactype) */
   1735 	dict = device_properties(sc->sc_dev);
   1736 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1737 
   1738 	/*
   1739 	 * Map the device.  All devices support memory-mapped acccess,
   1740 	 * and it is really required for normal operation.
   1741 	 */
   1742 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1743 	switch (memtype) {
   1744 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1745 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1746 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1747 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1748 		break;
   1749 	default:
   1750 		memh_valid = 0;
   1751 		break;
   1752 	}
   1753 
   1754 	if (memh_valid) {
   1755 		sc->sc_st = memt;
   1756 		sc->sc_sh = memh;
   1757 		sc->sc_ss = memsize;
   1758 	} else {
   1759 		aprint_error_dev(sc->sc_dev,
   1760 		    "unable to map device registers\n");
   1761 		return;
   1762 	}
   1763 
   1764 	/*
   1765 	 * In addition, i82544 and later support I/O mapped indirect
   1766 	 * register access.  It is not desirable (nor supported in
   1767 	 * this driver) to use it for normal operation, though it is
   1768 	 * required to work around bugs in some chip versions.
   1769 	 */
   1770 	if (sc->sc_type >= WM_T_82544) {
   1771 		/* First we have to find the I/O BAR. */
   1772 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1773 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1774 			if (memtype == PCI_MAPREG_TYPE_IO)
   1775 				break;
   1776 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1777 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1778 				i += 4;	/* skip high bits, too */
   1779 		}
   1780 		if (i < PCI_MAPREG_END) {
   1781 			/*
   1782 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1783 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1784 			 * It's no problem because newer chips has no this
   1785 			 * bug.
   1786 			 *
   1787 			 * The i8254x doesn't apparently respond when the
   1788 			 * I/O BAR is 0, which looks somewhat like it's not
   1789 			 * been configured.
   1790 			 */
   1791 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1792 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1793 				aprint_error_dev(sc->sc_dev,
   1794 				    "WARNING: I/O BAR at zero.\n");
   1795 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1796 					0, &sc->sc_iot, &sc->sc_ioh,
   1797 					NULL, &sc->sc_ios) == 0) {
   1798 				sc->sc_flags |= WM_F_IOH_VALID;
   1799 			} else {
   1800 				aprint_error_dev(sc->sc_dev,
   1801 				    "WARNING: unable to map I/O space\n");
   1802 			}
   1803 		}
   1804 
   1805 	}
   1806 
   1807 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1808 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1809 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1810 	if (sc->sc_type < WM_T_82542_2_1)
   1811 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1812 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1813 
   1814 	/* power up chip */
   1815 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1816 	    NULL)) && error != EOPNOTSUPP) {
   1817 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1818 		return;
   1819 	}
   1820 
   1821 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1822 
   1823 	/* Allocation settings */
   1824 	max_type = PCI_INTR_TYPE_MSIX;
   1825 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1826 	counts[PCI_INTR_TYPE_MSI] = 1;
   1827 	counts[PCI_INTR_TYPE_INTX] = 1;
   1828 
   1829 alloc_retry:
   1830 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1831 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1832 		return;
   1833 	}
   1834 
   1835 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1836 		error = wm_setup_msix(sc);
   1837 		if (error) {
   1838 			pci_intr_release(pc, sc->sc_intrs,
   1839 			    counts[PCI_INTR_TYPE_MSIX]);
   1840 
   1841 			/* Setup for MSI: Disable MSI-X */
   1842 			max_type = PCI_INTR_TYPE_MSI;
   1843 			counts[PCI_INTR_TYPE_MSI] = 1;
   1844 			counts[PCI_INTR_TYPE_INTX] = 1;
   1845 			goto alloc_retry;
   1846 		}
   1847 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1848 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1849 		error = wm_setup_legacy(sc);
   1850 		if (error) {
   1851 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1852 			    counts[PCI_INTR_TYPE_MSI]);
   1853 
   1854 			/* The next try is for INTx: Disable MSI */
   1855 			max_type = PCI_INTR_TYPE_INTX;
   1856 			counts[PCI_INTR_TYPE_INTX] = 1;
   1857 			goto alloc_retry;
   1858 		}
   1859 	} else {
   1860 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1861 		error = wm_setup_legacy(sc);
   1862 		if (error) {
   1863 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1864 			    counts[PCI_INTR_TYPE_INTX]);
   1865 			return;
   1866 		}
   1867 	}
   1868 
   1869 	/*
   1870 	 * Check the function ID (unit number of the chip).
   1871 	 */
   1872 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1873 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1874 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1875 	    || (sc->sc_type == WM_T_82580)
   1876 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1877 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1878 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1879 	else
   1880 		sc->sc_funcid = 0;
   1881 
   1882 	/*
   1883 	 * Determine a few things about the bus we're connected to.
   1884 	 */
   1885 	if (sc->sc_type < WM_T_82543) {
   1886 		/* We don't really know the bus characteristics here. */
   1887 		sc->sc_bus_speed = 33;
   1888 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1889 		/*
   1890 		 * CSA (Communication Streaming Architecture) is about as fast
   1891 		 * a 32-bit 66MHz PCI Bus.
   1892 		 */
   1893 		sc->sc_flags |= WM_F_CSA;
   1894 		sc->sc_bus_speed = 66;
   1895 		aprint_verbose_dev(sc->sc_dev,
   1896 		    "Communication Streaming Architecture\n");
   1897 		if (sc->sc_type == WM_T_82547) {
   1898 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1899 			callout_setfunc(&sc->sc_txfifo_ch,
   1900 					wm_82547_txfifo_stall, sc);
   1901 			aprint_verbose_dev(sc->sc_dev,
   1902 			    "using 82547 Tx FIFO stall work-around\n");
   1903 		}
   1904 	} else if (sc->sc_type >= WM_T_82571) {
   1905 		sc->sc_flags |= WM_F_PCIE;
   1906 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1907 		    && (sc->sc_type != WM_T_ICH10)
   1908 		    && (sc->sc_type != WM_T_PCH)
   1909 		    && (sc->sc_type != WM_T_PCH2)
   1910 		    && (sc->sc_type != WM_T_PCH_LPT)
   1911 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1912 			/* ICH* and PCH* have no PCIe capability registers */
   1913 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1914 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1915 				NULL) == 0)
   1916 				aprint_error_dev(sc->sc_dev,
   1917 				    "unable to find PCIe capability\n");
   1918 		}
   1919 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1920 	} else {
   1921 		reg = CSR_READ(sc, WMREG_STATUS);
   1922 		if (reg & STATUS_BUS64)
   1923 			sc->sc_flags |= WM_F_BUS64;
   1924 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1925 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1926 
   1927 			sc->sc_flags |= WM_F_PCIX;
   1928 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1929 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1930 				aprint_error_dev(sc->sc_dev,
   1931 				    "unable to find PCIX capability\n");
   1932 			else if (sc->sc_type != WM_T_82545_3 &&
   1933 				 sc->sc_type != WM_T_82546_3) {
   1934 				/*
   1935 				 * Work around a problem caused by the BIOS
   1936 				 * setting the max memory read byte count
   1937 				 * incorrectly.
   1938 				 */
   1939 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1940 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1941 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1942 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1943 
   1944 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1945 				    PCIX_CMD_BYTECNT_SHIFT;
   1946 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1947 				    PCIX_STATUS_MAXB_SHIFT;
   1948 				if (bytecnt > maxb) {
   1949 					aprint_verbose_dev(sc->sc_dev,
   1950 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1951 					    512 << bytecnt, 512 << maxb);
   1952 					pcix_cmd = (pcix_cmd &
   1953 					    ~PCIX_CMD_BYTECNT_MASK) |
   1954 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1955 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1956 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1957 					    pcix_cmd);
   1958 				}
   1959 			}
   1960 		}
   1961 		/*
   1962 		 * The quad port adapter is special; it has a PCIX-PCIX
   1963 		 * bridge on the board, and can run the secondary bus at
   1964 		 * a higher speed.
   1965 		 */
   1966 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1967 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1968 								      : 66;
   1969 		} else if (sc->sc_flags & WM_F_PCIX) {
   1970 			switch (reg & STATUS_PCIXSPD_MASK) {
   1971 			case STATUS_PCIXSPD_50_66:
   1972 				sc->sc_bus_speed = 66;
   1973 				break;
   1974 			case STATUS_PCIXSPD_66_100:
   1975 				sc->sc_bus_speed = 100;
   1976 				break;
   1977 			case STATUS_PCIXSPD_100_133:
   1978 				sc->sc_bus_speed = 133;
   1979 				break;
   1980 			default:
   1981 				aprint_error_dev(sc->sc_dev,
   1982 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1983 				    reg & STATUS_PCIXSPD_MASK);
   1984 				sc->sc_bus_speed = 66;
   1985 				break;
   1986 			}
   1987 		} else
   1988 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1989 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1990 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1991 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1992 	}
   1993 
   1994 	/* clear interesting stat counters */
   1995 	CSR_READ(sc, WMREG_COLC);
   1996 	CSR_READ(sc, WMREG_RXERRC);
   1997 
   1998 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1999 	    || (sc->sc_type >= WM_T_ICH8))
   2000 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2001 	if (sc->sc_type >= WM_T_ICH8)
   2002 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2003 
   2004 	/* Set PHY, NVM mutex related stuff */
   2005 	switch (sc->sc_type) {
   2006 	case WM_T_82542_2_0:
   2007 	case WM_T_82542_2_1:
   2008 	case WM_T_82543:
   2009 	case WM_T_82544:
   2010 		/* Microwire */
   2011 		sc->sc_nvm_wordsize = 64;
   2012 		sc->sc_nvm_addrbits = 6;
   2013 		break;
   2014 	case WM_T_82540:
   2015 	case WM_T_82545:
   2016 	case WM_T_82545_3:
   2017 	case WM_T_82546:
   2018 	case WM_T_82546_3:
   2019 		/* Microwire */
   2020 		reg = CSR_READ(sc, WMREG_EECD);
   2021 		if (reg & EECD_EE_SIZE) {
   2022 			sc->sc_nvm_wordsize = 256;
   2023 			sc->sc_nvm_addrbits = 8;
   2024 		} else {
   2025 			sc->sc_nvm_wordsize = 64;
   2026 			sc->sc_nvm_addrbits = 6;
   2027 		}
   2028 		sc->sc_flags |= WM_F_LOCK_EECD;
   2029 		break;
   2030 	case WM_T_82541:
   2031 	case WM_T_82541_2:
   2032 	case WM_T_82547:
   2033 	case WM_T_82547_2:
   2034 		sc->sc_flags |= WM_F_LOCK_EECD;
   2035 		reg = CSR_READ(sc, WMREG_EECD);
   2036 		if (reg & EECD_EE_TYPE) {
   2037 			/* SPI */
   2038 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2039 			wm_nvm_set_addrbits_size_eecd(sc);
   2040 		} else {
   2041 			/* Microwire */
   2042 			if ((reg & EECD_EE_ABITS) != 0) {
   2043 				sc->sc_nvm_wordsize = 256;
   2044 				sc->sc_nvm_addrbits = 8;
   2045 			} else {
   2046 				sc->sc_nvm_wordsize = 64;
   2047 				sc->sc_nvm_addrbits = 6;
   2048 			}
   2049 		}
   2050 		break;
   2051 	case WM_T_82571:
   2052 	case WM_T_82572:
   2053 		/* SPI */
   2054 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2055 		wm_nvm_set_addrbits_size_eecd(sc);
   2056 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2057 		sc->phy.acquire = wm_get_swsm_semaphore;
   2058 		sc->phy.release = wm_put_swsm_semaphore;
   2059 		break;
   2060 	case WM_T_82573:
   2061 	case WM_T_82574:
   2062 	case WM_T_82583:
   2063 		if (sc->sc_type == WM_T_82573) {
   2064 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2065 			sc->phy.acquire = wm_get_swsm_semaphore;
   2066 			sc->phy.release = wm_put_swsm_semaphore;
   2067 		} else {
   2068 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2069 			/* Both PHY and NVM use the same semaphore. */
   2070 			sc->phy.acquire
   2071 			    = wm_get_swfwhw_semaphore;
   2072 			sc->phy.release
   2073 			    = wm_put_swfwhw_semaphore;
   2074 		}
   2075 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2076 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2077 			sc->sc_nvm_wordsize = 2048;
   2078 		} else {
   2079 			/* SPI */
   2080 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2081 			wm_nvm_set_addrbits_size_eecd(sc);
   2082 		}
   2083 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2084 		break;
   2085 	case WM_T_82575:
   2086 	case WM_T_82576:
   2087 	case WM_T_82580:
   2088 	case WM_T_I350:
   2089 	case WM_T_I354:
   2090 	case WM_T_80003:
   2091 		/* SPI */
   2092 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2093 		wm_nvm_set_addrbits_size_eecd(sc);
   2094 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2095 		    | WM_F_LOCK_SWSM;
   2096 		sc->phy.acquire = wm_get_phy_82575;
   2097 		sc->phy.release = wm_put_phy_82575;
   2098 		break;
   2099 	case WM_T_ICH8:
   2100 	case WM_T_ICH9:
   2101 	case WM_T_ICH10:
   2102 	case WM_T_PCH:
   2103 	case WM_T_PCH2:
   2104 	case WM_T_PCH_LPT:
   2105 		/* FLASH */
   2106 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2107 		sc->sc_nvm_wordsize = 2048;
   2108 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2109 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2110 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2111 			aprint_error_dev(sc->sc_dev,
   2112 			    "can't map FLASH registers\n");
   2113 			goto out;
   2114 		}
   2115 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2116 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2117 		    ICH_FLASH_SECTOR_SIZE;
   2118 		sc->sc_ich8_flash_bank_size =
   2119 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2120 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2121 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2122 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2123 		sc->sc_flashreg_offset = 0;
   2124 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2125 		sc->phy.release = wm_put_swflag_ich8lan;
   2126 		break;
   2127 	case WM_T_PCH_SPT:
   2128 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2129 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2130 		sc->sc_flasht = sc->sc_st;
   2131 		sc->sc_flashh = sc->sc_sh;
   2132 		sc->sc_ich8_flash_base = 0;
   2133 		sc->sc_nvm_wordsize =
   2134 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2135 			* NVM_SIZE_MULTIPLIER;
   2136 		/* It is size in bytes, we want words */
   2137 		sc->sc_nvm_wordsize /= 2;
   2138 		/* assume 2 banks */
   2139 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2140 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2141 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2142 		sc->phy.release = wm_put_swflag_ich8lan;
   2143 		break;
   2144 	case WM_T_I210:
   2145 	case WM_T_I211:
   2146 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2147 			wm_nvm_set_addrbits_size_eecd(sc);
   2148 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2149 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2150 		} else {
   2151 			sc->sc_nvm_wordsize = INVM_SIZE;
   2152 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2153 		}
   2154 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2155 		sc->phy.acquire = wm_get_phy_82575;
   2156 		sc->phy.release = wm_put_phy_82575;
   2157 		break;
   2158 	default:
   2159 		break;
   2160 	}
   2161 
   2162 	/* Reset the chip to a known state. */
   2163 	wm_reset(sc);
   2164 
   2165 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2166 	switch (sc->sc_type) {
   2167 	case WM_T_82571:
   2168 	case WM_T_82572:
   2169 		reg = CSR_READ(sc, WMREG_SWSM2);
   2170 		if ((reg & SWSM2_LOCK) == 0) {
   2171 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2172 			force_clear_smbi = true;
   2173 		} else
   2174 			force_clear_smbi = false;
   2175 		break;
   2176 	case WM_T_82573:
   2177 	case WM_T_82574:
   2178 	case WM_T_82583:
   2179 		force_clear_smbi = true;
   2180 		break;
   2181 	default:
   2182 		force_clear_smbi = false;
   2183 		break;
   2184 	}
   2185 	if (force_clear_smbi) {
   2186 		reg = CSR_READ(sc, WMREG_SWSM);
   2187 		if ((reg & SWSM_SMBI) != 0)
   2188 			aprint_error_dev(sc->sc_dev,
   2189 			    "Please update the Bootagent\n");
   2190 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2191 	}
   2192 
   2193 	/*
   2194 	 * Defer printing the EEPROM type until after verifying the checksum
   2195 	 * This allows the EEPROM type to be printed correctly in the case
   2196 	 * that no EEPROM is attached.
   2197 	 */
   2198 	/*
   2199 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2200 	 * this for later, so we can fail future reads from the EEPROM.
   2201 	 */
   2202 	if (wm_nvm_validate_checksum(sc)) {
   2203 		/*
   2204 		 * Read twice again because some PCI-e parts fail the
   2205 		 * first check due to the link being in sleep state.
   2206 		 */
   2207 		if (wm_nvm_validate_checksum(sc))
   2208 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2209 	}
   2210 
   2211 	/* Set device properties (macflags) */
   2212 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2213 
   2214 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2215 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2216 	else {
   2217 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2218 		    sc->sc_nvm_wordsize);
   2219 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2220 			aprint_verbose("iNVM");
   2221 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2222 			aprint_verbose("FLASH(HW)");
   2223 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2224 			aprint_verbose("FLASH");
   2225 		else {
   2226 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2227 				eetype = "SPI";
   2228 			else
   2229 				eetype = "MicroWire";
   2230 			aprint_verbose("(%d address bits) %s EEPROM",
   2231 			    sc->sc_nvm_addrbits, eetype);
   2232 		}
   2233 	}
   2234 	wm_nvm_version(sc);
   2235 	aprint_verbose("\n");
   2236 
   2237 	/* Check for I21[01] PLL workaround */
   2238 	if (sc->sc_type == WM_T_I210)
   2239 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2240 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2241 		/* NVM image release 3.25 has a workaround */
   2242 		if ((sc->sc_nvm_ver_major < 3)
   2243 		    || ((sc->sc_nvm_ver_major == 3)
   2244 			&& (sc->sc_nvm_ver_minor < 25))) {
   2245 			aprint_verbose_dev(sc->sc_dev,
   2246 			    "ROM image version %d.%d is older than 3.25\n",
   2247 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2248 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2249 		}
   2250 	}
   2251 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2252 		wm_pll_workaround_i210(sc);
   2253 
   2254 	wm_get_wakeup(sc);
   2255 
   2256 	/* Non-AMT based hardware can now take control from firmware */
   2257 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2258 		wm_get_hw_control(sc);
   2259 
   2260 	/*
   2261 	 * Read the Ethernet address from the EEPROM, if not first found
   2262 	 * in device properties.
   2263 	 */
   2264 	ea = prop_dictionary_get(dict, "mac-address");
   2265 	if (ea != NULL) {
   2266 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2267 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2268 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2269 	} else {
   2270 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2271 			aprint_error_dev(sc->sc_dev,
   2272 			    "unable to read Ethernet address\n");
   2273 			goto out;
   2274 		}
   2275 	}
   2276 
   2277 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2278 	    ether_sprintf(enaddr));
   2279 
   2280 	/*
   2281 	 * Read the config info from the EEPROM, and set up various
   2282 	 * bits in the control registers based on their contents.
   2283 	 */
   2284 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2285 	if (pn != NULL) {
   2286 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2287 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2288 	} else {
   2289 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2290 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2291 			goto out;
   2292 		}
   2293 	}
   2294 
   2295 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2296 	if (pn != NULL) {
   2297 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2298 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2299 	} else {
   2300 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2301 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2302 			goto out;
   2303 		}
   2304 	}
   2305 
   2306 	/* check for WM_F_WOL */
   2307 	switch (sc->sc_type) {
   2308 	case WM_T_82542_2_0:
   2309 	case WM_T_82542_2_1:
   2310 	case WM_T_82543:
   2311 		/* dummy? */
   2312 		eeprom_data = 0;
   2313 		apme_mask = NVM_CFG3_APME;
   2314 		break;
   2315 	case WM_T_82544:
   2316 		apme_mask = NVM_CFG2_82544_APM_EN;
   2317 		eeprom_data = cfg2;
   2318 		break;
   2319 	case WM_T_82546:
   2320 	case WM_T_82546_3:
   2321 	case WM_T_82571:
   2322 	case WM_T_82572:
   2323 	case WM_T_82573:
   2324 	case WM_T_82574:
   2325 	case WM_T_82583:
   2326 	case WM_T_80003:
   2327 	default:
   2328 		apme_mask = NVM_CFG3_APME;
   2329 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2330 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2331 		break;
   2332 	case WM_T_82575:
   2333 	case WM_T_82576:
   2334 	case WM_T_82580:
   2335 	case WM_T_I350:
   2336 	case WM_T_I354: /* XXX ok? */
   2337 	case WM_T_ICH8:
   2338 	case WM_T_ICH9:
   2339 	case WM_T_ICH10:
   2340 	case WM_T_PCH:
   2341 	case WM_T_PCH2:
   2342 	case WM_T_PCH_LPT:
   2343 	case WM_T_PCH_SPT:
   2344 		/* XXX The funcid should be checked on some devices */
   2345 		apme_mask = WUC_APME;
   2346 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2347 		break;
   2348 	}
   2349 
   2350 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2351 	if ((eeprom_data & apme_mask) != 0)
   2352 		sc->sc_flags |= WM_F_WOL;
   2353 #ifdef WM_DEBUG
   2354 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2355 		printf("WOL\n");
   2356 #endif
   2357 
   2358 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2359 		/* Check NVM for autonegotiation */
   2360 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2361 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2362 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2363 		}
   2364 	}
   2365 
   2366 	/*
   2367 	 * XXX need special handling for some multiple port cards
   2368 	 * to disable a paticular port.
   2369 	 */
   2370 
   2371 	if (sc->sc_type >= WM_T_82544) {
   2372 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2373 		if (pn != NULL) {
   2374 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2375 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2376 		} else {
   2377 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2378 				aprint_error_dev(sc->sc_dev,
   2379 				    "unable to read SWDPIN\n");
   2380 				goto out;
   2381 			}
   2382 		}
   2383 	}
   2384 
   2385 	if (cfg1 & NVM_CFG1_ILOS)
   2386 		sc->sc_ctrl |= CTRL_ILOS;
   2387 
   2388 	/*
   2389 	 * XXX
   2390 	 * This code isn't correct because pin 2 and 3 are located
   2391 	 * in different position on newer chips. Check all datasheet.
   2392 	 *
   2393 	 * Until resolve this problem, check if a chip < 82580
   2394 	 */
   2395 	if (sc->sc_type <= WM_T_82580) {
   2396 		if (sc->sc_type >= WM_T_82544) {
   2397 			sc->sc_ctrl |=
   2398 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2399 			    CTRL_SWDPIO_SHIFT;
   2400 			sc->sc_ctrl |=
   2401 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2402 			    CTRL_SWDPINS_SHIFT;
   2403 		} else {
   2404 			sc->sc_ctrl |=
   2405 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2406 			    CTRL_SWDPIO_SHIFT;
   2407 		}
   2408 	}
   2409 
   2410 	/* XXX For other than 82580? */
   2411 	if (sc->sc_type == WM_T_82580) {
   2412 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2413 		if (nvmword & __BIT(13))
   2414 			sc->sc_ctrl |= CTRL_ILOS;
   2415 	}
   2416 
   2417 #if 0
   2418 	if (sc->sc_type >= WM_T_82544) {
   2419 		if (cfg1 & NVM_CFG1_IPS0)
   2420 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2421 		if (cfg1 & NVM_CFG1_IPS1)
   2422 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2423 		sc->sc_ctrl_ext |=
   2424 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2425 		    CTRL_EXT_SWDPIO_SHIFT;
   2426 		sc->sc_ctrl_ext |=
   2427 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2428 		    CTRL_EXT_SWDPINS_SHIFT;
   2429 	} else {
   2430 		sc->sc_ctrl_ext |=
   2431 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2432 		    CTRL_EXT_SWDPIO_SHIFT;
   2433 	}
   2434 #endif
   2435 
   2436 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2437 #if 0
   2438 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2439 #endif
   2440 
   2441 	if (sc->sc_type == WM_T_PCH) {
   2442 		uint16_t val;
   2443 
   2444 		/* Save the NVM K1 bit setting */
   2445 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2446 
   2447 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2448 			sc->sc_nvm_k1_enabled = 1;
   2449 		else
   2450 			sc->sc_nvm_k1_enabled = 0;
   2451 	}
   2452 
   2453 	/*
   2454 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2455 	 * media structures accordingly.
   2456 	 */
   2457 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2458 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2459 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2460 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2461 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2462 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2463 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2464 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2465 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2466 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2467 	    || (sc->sc_type ==WM_T_I211)) {
   2468 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2469 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2470 		switch (link_mode) {
   2471 		case CTRL_EXT_LINK_MODE_1000KX:
   2472 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2473 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2474 			break;
   2475 		case CTRL_EXT_LINK_MODE_SGMII:
   2476 			if (wm_sgmii_uses_mdio(sc)) {
   2477 				aprint_verbose_dev(sc->sc_dev,
   2478 				    "SGMII(MDIO)\n");
   2479 				sc->sc_flags |= WM_F_SGMII;
   2480 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2481 				break;
   2482 			}
   2483 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2484 			/*FALLTHROUGH*/
   2485 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2486 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2487 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2488 				if (link_mode
   2489 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2490 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2491 					sc->sc_flags |= WM_F_SGMII;
   2492 				} else {
   2493 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2494 					aprint_verbose_dev(sc->sc_dev,
   2495 					    "SERDES\n");
   2496 				}
   2497 				break;
   2498 			}
   2499 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2500 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2501 
   2502 			/* Change current link mode setting */
   2503 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2504 			switch (sc->sc_mediatype) {
   2505 			case WM_MEDIATYPE_COPPER:
   2506 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2507 				break;
   2508 			case WM_MEDIATYPE_SERDES:
   2509 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2510 				break;
   2511 			default:
   2512 				break;
   2513 			}
   2514 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2515 			break;
   2516 		case CTRL_EXT_LINK_MODE_GMII:
   2517 		default:
   2518 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2519 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2520 			break;
   2521 		}
   2522 
   2523 		reg &= ~CTRL_EXT_I2C_ENA;
   2524 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2525 			reg |= CTRL_EXT_I2C_ENA;
   2526 		else
   2527 			reg &= ~CTRL_EXT_I2C_ENA;
   2528 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2529 
   2530 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2531 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2532 		else
   2533 			wm_tbi_mediainit(sc);
   2534 	} else if (sc->sc_type < WM_T_82543 ||
   2535 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2536 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2537 			aprint_error_dev(sc->sc_dev,
   2538 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2539 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2540 		}
   2541 		wm_tbi_mediainit(sc);
   2542 	} else {
   2543 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2544 			aprint_error_dev(sc->sc_dev,
   2545 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2546 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2547 		}
   2548 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2549 	}
   2550 
   2551 	ifp = &sc->sc_ethercom.ec_if;
   2552 	xname = device_xname(sc->sc_dev);
   2553 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2554 	ifp->if_softc = sc;
   2555 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2556 #ifdef WM_MPSAFE
   2557 	ifp->if_extflags = IFEF_START_MPSAFE;
   2558 #endif
   2559 	ifp->if_ioctl = wm_ioctl;
   2560 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2561 		ifp->if_start = wm_nq_start;
   2562 		if (sc->sc_nqueues > 1)
   2563 			ifp->if_transmit = wm_nq_transmit;
   2564 	} else {
   2565 		ifp->if_start = wm_start;
   2566 		if (sc->sc_nqueues > 1)
   2567 			ifp->if_transmit = wm_transmit;
   2568 	}
   2569 	ifp->if_watchdog = wm_watchdog;
   2570 	ifp->if_init = wm_init;
   2571 	ifp->if_stop = wm_stop;
   2572 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2573 	IFQ_SET_READY(&ifp->if_snd);
   2574 
   2575 	/* Check for jumbo frame */
   2576 	switch (sc->sc_type) {
   2577 	case WM_T_82573:
   2578 		/* XXX limited to 9234 if ASPM is disabled */
   2579 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2580 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2581 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2582 		break;
   2583 	case WM_T_82571:
   2584 	case WM_T_82572:
   2585 	case WM_T_82574:
   2586 	case WM_T_82575:
   2587 	case WM_T_82576:
   2588 	case WM_T_82580:
   2589 	case WM_T_I350:
   2590 	case WM_T_I354: /* XXXX ok? */
   2591 	case WM_T_I210:
   2592 	case WM_T_I211:
   2593 	case WM_T_80003:
   2594 	case WM_T_ICH9:
   2595 	case WM_T_ICH10:
   2596 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2597 	case WM_T_PCH_LPT:
   2598 	case WM_T_PCH_SPT:
   2599 		/* XXX limited to 9234 */
   2600 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2601 		break;
   2602 	case WM_T_PCH:
   2603 		/* XXX limited to 4096 */
   2604 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2605 		break;
   2606 	case WM_T_82542_2_0:
   2607 	case WM_T_82542_2_1:
   2608 	case WM_T_82583:
   2609 	case WM_T_ICH8:
   2610 		/* No support for jumbo frame */
   2611 		break;
   2612 	default:
   2613 		/* ETHER_MAX_LEN_JUMBO */
   2614 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2615 		break;
   2616 	}
   2617 
   2618 	/* If we're a i82543 or greater, we can support VLANs. */
   2619 	if (sc->sc_type >= WM_T_82543)
   2620 		sc->sc_ethercom.ec_capabilities |=
   2621 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2622 
   2623 	/*
   2624 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2625 	 * on i82543 and later.
   2626 	 */
   2627 	if (sc->sc_type >= WM_T_82543) {
   2628 		ifp->if_capabilities |=
   2629 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2630 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2631 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2632 		    IFCAP_CSUM_TCPv6_Tx |
   2633 		    IFCAP_CSUM_UDPv6_Tx;
   2634 	}
   2635 
   2636 	/*
   2637 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2638 	 *
   2639 	 *	82541GI (8086:1076) ... no
   2640 	 *	82572EI (8086:10b9) ... yes
   2641 	 */
   2642 	if (sc->sc_type >= WM_T_82571) {
   2643 		ifp->if_capabilities |=
   2644 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2645 	}
   2646 
   2647 	/*
   2648 	 * If we're a i82544 or greater (except i82547), we can do
   2649 	 * TCP segmentation offload.
   2650 	 */
   2651 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2652 		ifp->if_capabilities |= IFCAP_TSOv4;
   2653 	}
   2654 
   2655 	if (sc->sc_type >= WM_T_82571) {
   2656 		ifp->if_capabilities |= IFCAP_TSOv6;
   2657 	}
   2658 
   2659 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2660 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2661 
   2662 #ifdef WM_MPSAFE
   2663 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2664 #else
   2665 	sc->sc_core_lock = NULL;
   2666 #endif
   2667 
   2668 	/* Attach the interface. */
   2669 	if_initialize(ifp);
   2670 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2671 	ether_ifattach(ifp, enaddr);
   2672 	if_register(ifp);
   2673 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2674 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2675 			  RND_FLAG_DEFAULT);
   2676 
   2677 #ifdef WM_EVENT_COUNTERS
   2678 	/* Attach event counters. */
   2679 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2680 	    NULL, xname, "linkintr");
   2681 
   2682 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2683 	    NULL, xname, "tx_xoff");
   2684 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2685 	    NULL, xname, "tx_xon");
   2686 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2687 	    NULL, xname, "rx_xoff");
   2688 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2689 	    NULL, xname, "rx_xon");
   2690 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2691 	    NULL, xname, "rx_macctl");
   2692 #endif /* WM_EVENT_COUNTERS */
   2693 
   2694 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2695 		pmf_class_network_register(self, ifp);
   2696 	else
   2697 		aprint_error_dev(self, "couldn't establish power handler\n");
   2698 
   2699 	sc->sc_flags |= WM_F_ATTACHED;
   2700  out:
   2701 	return;
   2702 }
   2703 
   2704 /* The detach function (ca_detach) */
   2705 static int
   2706 wm_detach(device_t self, int flags __unused)
   2707 {
   2708 	struct wm_softc *sc = device_private(self);
   2709 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2710 	int i;
   2711 
   2712 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2713 		return 0;
   2714 
   2715 	/* Stop the interface. Callouts are stopped in it. */
   2716 	wm_stop(ifp, 1);
   2717 
   2718 	pmf_device_deregister(self);
   2719 
   2720 #ifdef WM_EVENT_COUNTERS
   2721 	evcnt_detach(&sc->sc_ev_linkintr);
   2722 
   2723 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2724 	evcnt_detach(&sc->sc_ev_tx_xon);
   2725 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2726 	evcnt_detach(&sc->sc_ev_rx_xon);
   2727 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2728 #endif /* WM_EVENT_COUNTERS */
   2729 
   2730 	/* Tell the firmware about the release */
   2731 	WM_CORE_LOCK(sc);
   2732 	wm_release_manageability(sc);
   2733 	wm_release_hw_control(sc);
   2734 	wm_enable_wakeup(sc);
   2735 	WM_CORE_UNLOCK(sc);
   2736 
   2737 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2738 
   2739 	/* Delete all remaining media. */
   2740 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2741 
   2742 	ether_ifdetach(ifp);
   2743 	if_detach(ifp);
   2744 	if_percpuq_destroy(sc->sc_ipq);
   2745 
   2746 	/* Unload RX dmamaps and free mbufs */
   2747 	for (i = 0; i < sc->sc_nqueues; i++) {
   2748 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2749 		mutex_enter(rxq->rxq_lock);
   2750 		wm_rxdrain(rxq);
   2751 		mutex_exit(rxq->rxq_lock);
   2752 	}
   2753 	/* Must unlock here */
   2754 
   2755 	/* Disestablish the interrupt handler */
   2756 	for (i = 0; i < sc->sc_nintrs; i++) {
   2757 		if (sc->sc_ihs[i] != NULL) {
   2758 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2759 			sc->sc_ihs[i] = NULL;
   2760 		}
   2761 	}
   2762 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2763 
   2764 	wm_free_txrx_queues(sc);
   2765 
   2766 	/* Unmap the registers */
   2767 	if (sc->sc_ss) {
   2768 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2769 		sc->sc_ss = 0;
   2770 	}
   2771 	if (sc->sc_ios) {
   2772 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2773 		sc->sc_ios = 0;
   2774 	}
   2775 	if (sc->sc_flashs) {
   2776 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2777 		sc->sc_flashs = 0;
   2778 	}
   2779 
   2780 	if (sc->sc_core_lock)
   2781 		mutex_obj_free(sc->sc_core_lock);
   2782 	if (sc->sc_ich_phymtx)
   2783 		mutex_obj_free(sc->sc_ich_phymtx);
   2784 	if (sc->sc_ich_nvmmtx)
   2785 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2786 
   2787 	return 0;
   2788 }
   2789 
   2790 static bool
   2791 wm_suspend(device_t self, const pmf_qual_t *qual)
   2792 {
   2793 	struct wm_softc *sc = device_private(self);
   2794 
   2795 	wm_release_manageability(sc);
   2796 	wm_release_hw_control(sc);
   2797 	wm_enable_wakeup(sc);
   2798 
   2799 	return true;
   2800 }
   2801 
   2802 static bool
   2803 wm_resume(device_t self, const pmf_qual_t *qual)
   2804 {
   2805 	struct wm_softc *sc = device_private(self);
   2806 
   2807 	wm_init_manageability(sc);
   2808 
   2809 	return true;
   2810 }
   2811 
   2812 /*
   2813  * wm_watchdog:		[ifnet interface function]
   2814  *
   2815  *	Watchdog timer handler.
   2816  */
   2817 static void
   2818 wm_watchdog(struct ifnet *ifp)
   2819 {
   2820 	int qid;
   2821 	struct wm_softc *sc = ifp->if_softc;
   2822 
   2823 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2824 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2825 
   2826 		wm_watchdog_txq(ifp, txq);
   2827 	}
   2828 
   2829 	/* Reset the interface. */
   2830 	(void) wm_init(ifp);
   2831 
   2832 	/*
   2833 	 * There are still some upper layer processing which call
   2834 	 * ifp->if_start(). e.g. ALTQ
   2835 	 */
   2836 	/* Try to get more packets going. */
   2837 	ifp->if_start(ifp);
   2838 }
   2839 
   2840 static void
   2841 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2842 {
   2843 	struct wm_softc *sc = ifp->if_softc;
   2844 
   2845 	/*
   2846 	 * Since we're using delayed interrupts, sweep up
   2847 	 * before we report an error.
   2848 	 */
   2849 	mutex_enter(txq->txq_lock);
   2850 	wm_txeof(sc, txq);
   2851 	mutex_exit(txq->txq_lock);
   2852 
   2853 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2854 #ifdef WM_DEBUG
   2855 		int i, j;
   2856 		struct wm_txsoft *txs;
   2857 #endif
   2858 		log(LOG_ERR,
   2859 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2860 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2861 		    txq->txq_next);
   2862 		ifp->if_oerrors++;
   2863 #ifdef WM_DEBUG
   2864 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2865 		    i = WM_NEXTTXS(txq, i)) {
   2866 		    txs = &txq->txq_soft[i];
   2867 		    printf("txs %d tx %d -> %d\n",
   2868 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2869 		    for (j = txs->txs_firstdesc; ;
   2870 			j = WM_NEXTTX(txq, j)) {
   2871 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2872 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2873 			printf("\t %#08x%08x\n",
   2874 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2875 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2876 			if (j == txs->txs_lastdesc)
   2877 				break;
   2878 			}
   2879 		}
   2880 #endif
   2881 	}
   2882 }
   2883 
   2884 /*
   2885  * wm_tick:
   2886  *
   2887  *	One second timer, used to check link status, sweep up
   2888  *	completed transmit jobs, etc.
   2889  */
   2890 static void
   2891 wm_tick(void *arg)
   2892 {
   2893 	struct wm_softc *sc = arg;
   2894 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2895 #ifndef WM_MPSAFE
   2896 	int s = splnet();
   2897 #endif
   2898 
   2899 	WM_CORE_LOCK(sc);
   2900 
   2901 	if (sc->sc_core_stopping)
   2902 		goto out;
   2903 
   2904 	if (sc->sc_type >= WM_T_82542_2_1) {
   2905 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2906 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2907 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2908 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2909 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2910 	}
   2911 
   2912 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2913 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2914 	    + CSR_READ(sc, WMREG_CRCERRS)
   2915 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2916 	    + CSR_READ(sc, WMREG_SYMERRC)
   2917 	    + CSR_READ(sc, WMREG_RXERRC)
   2918 	    + CSR_READ(sc, WMREG_SEC)
   2919 	    + CSR_READ(sc, WMREG_CEXTERR)
   2920 	    + CSR_READ(sc, WMREG_RLEC);
   2921 	/*
   2922 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2923 	 * memory. It does not mean the number of dropped packet. Because
   2924 	 * ethernet controller can receive packets in such case if there is
   2925 	 * space in phy's FIFO.
   2926 	 *
   2927 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2928 	 * own EVCNT instead of if_iqdrops.
   2929 	 */
   2930 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2931 
   2932 	if (sc->sc_flags & WM_F_HAS_MII)
   2933 		mii_tick(&sc->sc_mii);
   2934 	else if ((sc->sc_type >= WM_T_82575)
   2935 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2936 		wm_serdes_tick(sc);
   2937 	else
   2938 		wm_tbi_tick(sc);
   2939 
   2940 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2941 out:
   2942 	WM_CORE_UNLOCK(sc);
   2943 #ifndef WM_MPSAFE
   2944 	splx(s);
   2945 #endif
   2946 }
   2947 
   2948 static int
   2949 wm_ifflags_cb(struct ethercom *ec)
   2950 {
   2951 	struct ifnet *ifp = &ec->ec_if;
   2952 	struct wm_softc *sc = ifp->if_softc;
   2953 	int rc = 0;
   2954 
   2955 	WM_CORE_LOCK(sc);
   2956 
   2957 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2958 	sc->sc_if_flags = ifp->if_flags;
   2959 
   2960 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2961 		rc = ENETRESET;
   2962 		goto out;
   2963 	}
   2964 
   2965 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2966 		wm_set_filter(sc);
   2967 
   2968 	wm_set_vlan(sc);
   2969 
   2970 out:
   2971 	WM_CORE_UNLOCK(sc);
   2972 
   2973 	return rc;
   2974 }
   2975 
   2976 /*
   2977  * wm_ioctl:		[ifnet interface function]
   2978  *
   2979  *	Handle control requests from the operator.
   2980  */
   2981 static int
   2982 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2983 {
   2984 	struct wm_softc *sc = ifp->if_softc;
   2985 	struct ifreq *ifr = (struct ifreq *) data;
   2986 	struct ifaddr *ifa = (struct ifaddr *)data;
   2987 	struct sockaddr_dl *sdl;
   2988 	int s, error;
   2989 
   2990 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2991 		device_xname(sc->sc_dev), __func__));
   2992 
   2993 #ifndef WM_MPSAFE
   2994 	s = splnet();
   2995 #endif
   2996 	switch (cmd) {
   2997 	case SIOCSIFMEDIA:
   2998 	case SIOCGIFMEDIA:
   2999 		WM_CORE_LOCK(sc);
   3000 		/* Flow control requires full-duplex mode. */
   3001 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3002 		    (ifr->ifr_media & IFM_FDX) == 0)
   3003 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3004 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3005 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3006 				/* We can do both TXPAUSE and RXPAUSE. */
   3007 				ifr->ifr_media |=
   3008 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3009 			}
   3010 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3011 		}
   3012 		WM_CORE_UNLOCK(sc);
   3013 #ifdef WM_MPSAFE
   3014 		s = splnet();
   3015 #endif
   3016 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3017 #ifdef WM_MPSAFE
   3018 		splx(s);
   3019 #endif
   3020 		break;
   3021 	case SIOCINITIFADDR:
   3022 		WM_CORE_LOCK(sc);
   3023 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3024 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3025 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3026 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3027 			/* unicast address is first multicast entry */
   3028 			wm_set_filter(sc);
   3029 			error = 0;
   3030 			WM_CORE_UNLOCK(sc);
   3031 			break;
   3032 		}
   3033 		WM_CORE_UNLOCK(sc);
   3034 		/*FALLTHROUGH*/
   3035 	default:
   3036 #ifdef WM_MPSAFE
   3037 		s = splnet();
   3038 #endif
   3039 		/* It may call wm_start, so unlock here */
   3040 		error = ether_ioctl(ifp, cmd, data);
   3041 #ifdef WM_MPSAFE
   3042 		splx(s);
   3043 #endif
   3044 		if (error != ENETRESET)
   3045 			break;
   3046 
   3047 		error = 0;
   3048 
   3049 		if (cmd == SIOCSIFCAP) {
   3050 			error = (*ifp->if_init)(ifp);
   3051 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3052 			;
   3053 		else if (ifp->if_flags & IFF_RUNNING) {
   3054 			/*
   3055 			 * Multicast list has changed; set the hardware filter
   3056 			 * accordingly.
   3057 			 */
   3058 			WM_CORE_LOCK(sc);
   3059 			wm_set_filter(sc);
   3060 			WM_CORE_UNLOCK(sc);
   3061 		}
   3062 		break;
   3063 	}
   3064 
   3065 #ifndef WM_MPSAFE
   3066 	splx(s);
   3067 #endif
   3068 	return error;
   3069 }
   3070 
   3071 /* MAC address related */
   3072 
   3073 /*
   3074  * Get the offset of MAC address and return it.
   3075  * If error occured, use offset 0.
   3076  */
   3077 static uint16_t
   3078 wm_check_alt_mac_addr(struct wm_softc *sc)
   3079 {
   3080 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3081 	uint16_t offset = NVM_OFF_MACADDR;
   3082 
   3083 	/* Try to read alternative MAC address pointer */
   3084 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3085 		return 0;
   3086 
   3087 	/* Check pointer if it's valid or not. */
   3088 	if ((offset == 0x0000) || (offset == 0xffff))
   3089 		return 0;
   3090 
   3091 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3092 	/*
   3093 	 * Check whether alternative MAC address is valid or not.
   3094 	 * Some cards have non 0xffff pointer but those don't use
   3095 	 * alternative MAC address in reality.
   3096 	 *
   3097 	 * Check whether the broadcast bit is set or not.
   3098 	 */
   3099 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3100 		if (((myea[0] & 0xff) & 0x01) == 0)
   3101 			return offset; /* Found */
   3102 
   3103 	/* Not found */
   3104 	return 0;
   3105 }
   3106 
   3107 static int
   3108 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3109 {
   3110 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3111 	uint16_t offset = NVM_OFF_MACADDR;
   3112 	int do_invert = 0;
   3113 
   3114 	switch (sc->sc_type) {
   3115 	case WM_T_82580:
   3116 	case WM_T_I350:
   3117 	case WM_T_I354:
   3118 		/* EEPROM Top Level Partitioning */
   3119 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3120 		break;
   3121 	case WM_T_82571:
   3122 	case WM_T_82575:
   3123 	case WM_T_82576:
   3124 	case WM_T_80003:
   3125 	case WM_T_I210:
   3126 	case WM_T_I211:
   3127 		offset = wm_check_alt_mac_addr(sc);
   3128 		if (offset == 0)
   3129 			if ((sc->sc_funcid & 0x01) == 1)
   3130 				do_invert = 1;
   3131 		break;
   3132 	default:
   3133 		if ((sc->sc_funcid & 0x01) == 1)
   3134 			do_invert = 1;
   3135 		break;
   3136 	}
   3137 
   3138 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3139 		goto bad;
   3140 
   3141 	enaddr[0] = myea[0] & 0xff;
   3142 	enaddr[1] = myea[0] >> 8;
   3143 	enaddr[2] = myea[1] & 0xff;
   3144 	enaddr[3] = myea[1] >> 8;
   3145 	enaddr[4] = myea[2] & 0xff;
   3146 	enaddr[5] = myea[2] >> 8;
   3147 
   3148 	/*
   3149 	 * Toggle the LSB of the MAC address on the second port
   3150 	 * of some dual port cards.
   3151 	 */
   3152 	if (do_invert != 0)
   3153 		enaddr[5] ^= 1;
   3154 
   3155 	return 0;
   3156 
   3157  bad:
   3158 	return -1;
   3159 }
   3160 
   3161 /*
   3162  * wm_set_ral:
   3163  *
   3164  *	Set an entery in the receive address list.
   3165  */
   3166 static void
   3167 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3168 {
   3169 	uint32_t ral_lo, ral_hi;
   3170 
   3171 	if (enaddr != NULL) {
   3172 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3173 		    (enaddr[3] << 24);
   3174 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3175 		ral_hi |= RAL_AV;
   3176 	} else {
   3177 		ral_lo = 0;
   3178 		ral_hi = 0;
   3179 	}
   3180 
   3181 	if (sc->sc_type >= WM_T_82544) {
   3182 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3183 		    ral_lo);
   3184 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3185 		    ral_hi);
   3186 	} else {
   3187 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3188 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3189 	}
   3190 }
   3191 
   3192 /*
   3193  * wm_mchash:
   3194  *
   3195  *	Compute the hash of the multicast address for the 4096-bit
   3196  *	multicast filter.
   3197  */
   3198 static uint32_t
   3199 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3200 {
   3201 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3202 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3203 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3204 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3205 	uint32_t hash;
   3206 
   3207 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3208 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3209 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3210 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3211 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3212 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3213 		return (hash & 0x3ff);
   3214 	}
   3215 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3216 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3217 
   3218 	return (hash & 0xfff);
   3219 }
   3220 
   3221 /*
   3222  * wm_set_filter:
   3223  *
   3224  *	Set up the receive filter.
   3225  */
   3226 static void
   3227 wm_set_filter(struct wm_softc *sc)
   3228 {
   3229 	struct ethercom *ec = &sc->sc_ethercom;
   3230 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3231 	struct ether_multi *enm;
   3232 	struct ether_multistep step;
   3233 	bus_addr_t mta_reg;
   3234 	uint32_t hash, reg, bit;
   3235 	int i, size, ralmax;
   3236 
   3237 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3238 		device_xname(sc->sc_dev), __func__));
   3239 
   3240 	if (sc->sc_type >= WM_T_82544)
   3241 		mta_reg = WMREG_CORDOVA_MTA;
   3242 	else
   3243 		mta_reg = WMREG_MTA;
   3244 
   3245 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3246 
   3247 	if (ifp->if_flags & IFF_BROADCAST)
   3248 		sc->sc_rctl |= RCTL_BAM;
   3249 	if (ifp->if_flags & IFF_PROMISC) {
   3250 		sc->sc_rctl |= RCTL_UPE;
   3251 		goto allmulti;
   3252 	}
   3253 
   3254 	/*
   3255 	 * Set the station address in the first RAL slot, and
   3256 	 * clear the remaining slots.
   3257 	 */
   3258 	if (sc->sc_type == WM_T_ICH8)
   3259 		size = WM_RAL_TABSIZE_ICH8 -1;
   3260 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3261 	    || (sc->sc_type == WM_T_PCH))
   3262 		size = WM_RAL_TABSIZE_ICH8;
   3263 	else if (sc->sc_type == WM_T_PCH2)
   3264 		size = WM_RAL_TABSIZE_PCH2;
   3265 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3266 		size = WM_RAL_TABSIZE_PCH_LPT;
   3267 	else if (sc->sc_type == WM_T_82575)
   3268 		size = WM_RAL_TABSIZE_82575;
   3269 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3270 		size = WM_RAL_TABSIZE_82576;
   3271 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3272 		size = WM_RAL_TABSIZE_I350;
   3273 	else
   3274 		size = WM_RAL_TABSIZE;
   3275 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3276 
   3277 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3278 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3279 		switch (i) {
   3280 		case 0:
   3281 			/* We can use all entries */
   3282 			ralmax = size;
   3283 			break;
   3284 		case 1:
   3285 			/* Only RAR[0] */
   3286 			ralmax = 1;
   3287 			break;
   3288 		default:
   3289 			/* available SHRA + RAR[0] */
   3290 			ralmax = i + 1;
   3291 		}
   3292 	} else
   3293 		ralmax = size;
   3294 	for (i = 1; i < size; i++) {
   3295 		if (i < ralmax)
   3296 			wm_set_ral(sc, NULL, i);
   3297 	}
   3298 
   3299 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3300 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3301 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3302 	    || (sc->sc_type == WM_T_PCH_SPT))
   3303 		size = WM_ICH8_MC_TABSIZE;
   3304 	else
   3305 		size = WM_MC_TABSIZE;
   3306 	/* Clear out the multicast table. */
   3307 	for (i = 0; i < size; i++)
   3308 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3309 
   3310 	ETHER_LOCK(ec);
   3311 	ETHER_FIRST_MULTI(step, ec, enm);
   3312 	while (enm != NULL) {
   3313 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3314 			ETHER_UNLOCK(ec);
   3315 			/*
   3316 			 * We must listen to a range of multicast addresses.
   3317 			 * For now, just accept all multicasts, rather than
   3318 			 * trying to set only those filter bits needed to match
   3319 			 * the range.  (At this time, the only use of address
   3320 			 * ranges is for IP multicast routing, for which the
   3321 			 * range is big enough to require all bits set.)
   3322 			 */
   3323 			goto allmulti;
   3324 		}
   3325 
   3326 		hash = wm_mchash(sc, enm->enm_addrlo);
   3327 
   3328 		reg = (hash >> 5);
   3329 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3330 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3331 		    || (sc->sc_type == WM_T_PCH2)
   3332 		    || (sc->sc_type == WM_T_PCH_LPT)
   3333 		    || (sc->sc_type == WM_T_PCH_SPT))
   3334 			reg &= 0x1f;
   3335 		else
   3336 			reg &= 0x7f;
   3337 		bit = hash & 0x1f;
   3338 
   3339 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3340 		hash |= 1U << bit;
   3341 
   3342 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3343 			/*
   3344 			 * 82544 Errata 9: Certain register cannot be written
   3345 			 * with particular alignments in PCI-X bus operation
   3346 			 * (FCAH, MTA and VFTA).
   3347 			 */
   3348 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3349 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3350 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3351 		} else
   3352 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3353 
   3354 		ETHER_NEXT_MULTI(step, enm);
   3355 	}
   3356 	ETHER_UNLOCK(ec);
   3357 
   3358 	ifp->if_flags &= ~IFF_ALLMULTI;
   3359 	goto setit;
   3360 
   3361  allmulti:
   3362 	ifp->if_flags |= IFF_ALLMULTI;
   3363 	sc->sc_rctl |= RCTL_MPE;
   3364 
   3365  setit:
   3366 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3367 }
   3368 
   3369 /* Reset and init related */
   3370 
   3371 static void
   3372 wm_set_vlan(struct wm_softc *sc)
   3373 {
   3374 
   3375 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3376 		device_xname(sc->sc_dev), __func__));
   3377 
   3378 	/* Deal with VLAN enables. */
   3379 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3380 		sc->sc_ctrl |= CTRL_VME;
   3381 	else
   3382 		sc->sc_ctrl &= ~CTRL_VME;
   3383 
   3384 	/* Write the control registers. */
   3385 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3386 }
   3387 
   3388 static void
   3389 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3390 {
   3391 	uint32_t gcr;
   3392 	pcireg_t ctrl2;
   3393 
   3394 	gcr = CSR_READ(sc, WMREG_GCR);
   3395 
   3396 	/* Only take action if timeout value is defaulted to 0 */
   3397 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3398 		goto out;
   3399 
   3400 	if ((gcr & GCR_CAP_VER2) == 0) {
   3401 		gcr |= GCR_CMPL_TMOUT_10MS;
   3402 		goto out;
   3403 	}
   3404 
   3405 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3406 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3407 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3408 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3409 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3410 
   3411 out:
   3412 	/* Disable completion timeout resend */
   3413 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3414 
   3415 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3416 }
   3417 
   3418 void
   3419 wm_get_auto_rd_done(struct wm_softc *sc)
   3420 {
   3421 	int i;
   3422 
   3423 	/* wait for eeprom to reload */
   3424 	switch (sc->sc_type) {
   3425 	case WM_T_82571:
   3426 	case WM_T_82572:
   3427 	case WM_T_82573:
   3428 	case WM_T_82574:
   3429 	case WM_T_82583:
   3430 	case WM_T_82575:
   3431 	case WM_T_82576:
   3432 	case WM_T_82580:
   3433 	case WM_T_I350:
   3434 	case WM_T_I354:
   3435 	case WM_T_I210:
   3436 	case WM_T_I211:
   3437 	case WM_T_80003:
   3438 	case WM_T_ICH8:
   3439 	case WM_T_ICH9:
   3440 		for (i = 0; i < 10; i++) {
   3441 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3442 				break;
   3443 			delay(1000);
   3444 		}
   3445 		if (i == 10) {
   3446 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3447 			    "complete\n", device_xname(sc->sc_dev));
   3448 		}
   3449 		break;
   3450 	default:
   3451 		break;
   3452 	}
   3453 }
   3454 
   3455 void
   3456 wm_lan_init_done(struct wm_softc *sc)
   3457 {
   3458 	uint32_t reg = 0;
   3459 	int i;
   3460 
   3461 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3462 		device_xname(sc->sc_dev), __func__));
   3463 
   3464 	/* Wait for eeprom to reload */
   3465 	switch (sc->sc_type) {
   3466 	case WM_T_ICH10:
   3467 	case WM_T_PCH:
   3468 	case WM_T_PCH2:
   3469 	case WM_T_PCH_LPT:
   3470 	case WM_T_PCH_SPT:
   3471 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3472 			reg = CSR_READ(sc, WMREG_STATUS);
   3473 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3474 				break;
   3475 			delay(100);
   3476 		}
   3477 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3478 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3479 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3480 		}
   3481 		break;
   3482 	default:
   3483 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3484 		    __func__);
   3485 		break;
   3486 	}
   3487 
   3488 	reg &= ~STATUS_LAN_INIT_DONE;
   3489 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3490 }
   3491 
   3492 void
   3493 wm_get_cfg_done(struct wm_softc *sc)
   3494 {
   3495 	int mask;
   3496 	uint32_t reg;
   3497 	int i;
   3498 
   3499 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3500 		device_xname(sc->sc_dev), __func__));
   3501 
   3502 	/* Wait for eeprom to reload */
   3503 	switch (sc->sc_type) {
   3504 	case WM_T_82542_2_0:
   3505 	case WM_T_82542_2_1:
   3506 		/* null */
   3507 		break;
   3508 	case WM_T_82543:
   3509 	case WM_T_82544:
   3510 	case WM_T_82540:
   3511 	case WM_T_82545:
   3512 	case WM_T_82545_3:
   3513 	case WM_T_82546:
   3514 	case WM_T_82546_3:
   3515 	case WM_T_82541:
   3516 	case WM_T_82541_2:
   3517 	case WM_T_82547:
   3518 	case WM_T_82547_2:
   3519 	case WM_T_82573:
   3520 	case WM_T_82574:
   3521 	case WM_T_82583:
   3522 		/* generic */
   3523 		delay(10*1000);
   3524 		break;
   3525 	case WM_T_80003:
   3526 	case WM_T_82571:
   3527 	case WM_T_82572:
   3528 	case WM_T_82575:
   3529 	case WM_T_82576:
   3530 	case WM_T_82580:
   3531 	case WM_T_I350:
   3532 	case WM_T_I354:
   3533 	case WM_T_I210:
   3534 	case WM_T_I211:
   3535 		if (sc->sc_type == WM_T_82571) {
   3536 			/* Only 82571 shares port 0 */
   3537 			mask = EEMNGCTL_CFGDONE_0;
   3538 		} else
   3539 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3540 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3541 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3542 				break;
   3543 			delay(1000);
   3544 		}
   3545 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3546 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3547 				device_xname(sc->sc_dev), __func__));
   3548 		}
   3549 		break;
   3550 	case WM_T_ICH8:
   3551 	case WM_T_ICH9:
   3552 	case WM_T_ICH10:
   3553 	case WM_T_PCH:
   3554 	case WM_T_PCH2:
   3555 	case WM_T_PCH_LPT:
   3556 	case WM_T_PCH_SPT:
   3557 		delay(10*1000);
   3558 		if (sc->sc_type >= WM_T_ICH10)
   3559 			wm_lan_init_done(sc);
   3560 		else
   3561 			wm_get_auto_rd_done(sc);
   3562 
   3563 		reg = CSR_READ(sc, WMREG_STATUS);
   3564 		if ((reg & STATUS_PHYRA) != 0)
   3565 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3566 		break;
   3567 	default:
   3568 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3569 		    __func__);
   3570 		break;
   3571 	}
   3572 }
   3573 
   3574 /* Init hardware bits */
   3575 void
   3576 wm_initialize_hardware_bits(struct wm_softc *sc)
   3577 {
   3578 	uint32_t tarc0, tarc1, reg;
   3579 
   3580 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3581 		device_xname(sc->sc_dev), __func__));
   3582 
   3583 	/* For 82571 variant, 80003 and ICHs */
   3584 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3585 	    || (sc->sc_type >= WM_T_80003)) {
   3586 
   3587 		/* Transmit Descriptor Control 0 */
   3588 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3589 		reg |= TXDCTL_COUNT_DESC;
   3590 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3591 
   3592 		/* Transmit Descriptor Control 1 */
   3593 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3594 		reg |= TXDCTL_COUNT_DESC;
   3595 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3596 
   3597 		/* TARC0 */
   3598 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3599 		switch (sc->sc_type) {
   3600 		case WM_T_82571:
   3601 		case WM_T_82572:
   3602 		case WM_T_82573:
   3603 		case WM_T_82574:
   3604 		case WM_T_82583:
   3605 		case WM_T_80003:
   3606 			/* Clear bits 30..27 */
   3607 			tarc0 &= ~__BITS(30, 27);
   3608 			break;
   3609 		default:
   3610 			break;
   3611 		}
   3612 
   3613 		switch (sc->sc_type) {
   3614 		case WM_T_82571:
   3615 		case WM_T_82572:
   3616 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3617 
   3618 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3619 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3620 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3621 			/* 8257[12] Errata No.7 */
   3622 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3623 
   3624 			/* TARC1 bit 28 */
   3625 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3626 				tarc1 &= ~__BIT(28);
   3627 			else
   3628 				tarc1 |= __BIT(28);
   3629 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3630 
   3631 			/*
   3632 			 * 8257[12] Errata No.13
   3633 			 * Disable Dyamic Clock Gating.
   3634 			 */
   3635 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3636 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3637 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3638 			break;
   3639 		case WM_T_82573:
   3640 		case WM_T_82574:
   3641 		case WM_T_82583:
   3642 			if ((sc->sc_type == WM_T_82574)
   3643 			    || (sc->sc_type == WM_T_82583))
   3644 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3645 
   3646 			/* Extended Device Control */
   3647 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3648 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3649 			reg |= __BIT(22);	/* Set bit 22 */
   3650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3651 
   3652 			/* Device Control */
   3653 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3654 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3655 
   3656 			/* PCIe Control Register */
   3657 			/*
   3658 			 * 82573 Errata (unknown).
   3659 			 *
   3660 			 * 82574 Errata 25 and 82583 Errata 12
   3661 			 * "Dropped Rx Packets":
   3662 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3663 			 */
   3664 			reg = CSR_READ(sc, WMREG_GCR);
   3665 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3666 			CSR_WRITE(sc, WMREG_GCR, reg);
   3667 
   3668 			if ((sc->sc_type == WM_T_82574)
   3669 			    || (sc->sc_type == WM_T_82583)) {
   3670 				/*
   3671 				 * Document says this bit must be set for
   3672 				 * proper operation.
   3673 				 */
   3674 				reg = CSR_READ(sc, WMREG_GCR);
   3675 				reg |= __BIT(22);
   3676 				CSR_WRITE(sc, WMREG_GCR, reg);
   3677 
   3678 				/*
   3679 				 * Apply workaround for hardware errata
   3680 				 * documented in errata docs Fixes issue where
   3681 				 * some error prone or unreliable PCIe
   3682 				 * completions are occurring, particularly
   3683 				 * with ASPM enabled. Without fix, issue can
   3684 				 * cause Tx timeouts.
   3685 				 */
   3686 				reg = CSR_READ(sc, WMREG_GCR2);
   3687 				reg |= __BIT(0);
   3688 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3689 			}
   3690 			break;
   3691 		case WM_T_80003:
   3692 			/* TARC0 */
   3693 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3694 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3695 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3696 
   3697 			/* TARC1 bit 28 */
   3698 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3699 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3700 				tarc1 &= ~__BIT(28);
   3701 			else
   3702 				tarc1 |= __BIT(28);
   3703 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3704 			break;
   3705 		case WM_T_ICH8:
   3706 		case WM_T_ICH9:
   3707 		case WM_T_ICH10:
   3708 		case WM_T_PCH:
   3709 		case WM_T_PCH2:
   3710 		case WM_T_PCH_LPT:
   3711 		case WM_T_PCH_SPT:
   3712 			/* TARC0 */
   3713 			if ((sc->sc_type == WM_T_ICH8)
   3714 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3715 				/* Set TARC0 bits 29 and 28 */
   3716 				tarc0 |= __BITS(29, 28);
   3717 			}
   3718 			/* Set TARC0 bits 23,24,26,27 */
   3719 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3720 
   3721 			/* CTRL_EXT */
   3722 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3723 			reg |= __BIT(22);	/* Set bit 22 */
   3724 			/*
   3725 			 * Enable PHY low-power state when MAC is at D3
   3726 			 * w/o WoL
   3727 			 */
   3728 			if (sc->sc_type >= WM_T_PCH)
   3729 				reg |= CTRL_EXT_PHYPDEN;
   3730 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3731 
   3732 			/* TARC1 */
   3733 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3734 			/* bit 28 */
   3735 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3736 				tarc1 &= ~__BIT(28);
   3737 			else
   3738 				tarc1 |= __BIT(28);
   3739 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3740 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3741 
   3742 			/* Device Status */
   3743 			if (sc->sc_type == WM_T_ICH8) {
   3744 				reg = CSR_READ(sc, WMREG_STATUS);
   3745 				reg &= ~__BIT(31);
   3746 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3747 
   3748 			}
   3749 
   3750 			/* IOSFPC */
   3751 			if (sc->sc_type == WM_T_PCH_SPT) {
   3752 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3753 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3754 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3755 			}
   3756 			/*
   3757 			 * Work-around descriptor data corruption issue during
   3758 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3759 			 * capability.
   3760 			 */
   3761 			reg = CSR_READ(sc, WMREG_RFCTL);
   3762 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3763 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3764 			break;
   3765 		default:
   3766 			break;
   3767 		}
   3768 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3769 
   3770 		switch (sc->sc_type) {
   3771 		/*
   3772 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3773 		 * Avoid RSS Hash Value bug.
   3774 		 */
   3775 		case WM_T_82571:
   3776 		case WM_T_82572:
   3777 		case WM_T_82573:
   3778 		case WM_T_80003:
   3779 		case WM_T_ICH8:
   3780 			reg = CSR_READ(sc, WMREG_RFCTL);
   3781 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3782 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3783 			break;
   3784 		case WM_T_82574:
   3785 			/* use extened Rx descriptor. */
   3786 			reg = CSR_READ(sc, WMREG_RFCTL);
   3787 			reg |= WMREG_RFCTL_EXSTEN;
   3788 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3789 			break;
   3790 		default:
   3791 			break;
   3792 		}
   3793 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3794 		/*
   3795 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3796 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3797 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3798 		 * Correctly by the Device"
   3799 		 *
   3800 		 * I354(C2000) Errata AVR53:
   3801 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3802 		 * Hang"
   3803 		 */
   3804 		reg = CSR_READ(sc, WMREG_RFCTL);
   3805 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3806 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3807 	}
   3808 }
   3809 
   3810 static uint32_t
   3811 wm_rxpbs_adjust_82580(uint32_t val)
   3812 {
   3813 	uint32_t rv = 0;
   3814 
   3815 	if (val < __arraycount(wm_82580_rxpbs_table))
   3816 		rv = wm_82580_rxpbs_table[val];
   3817 
   3818 	return rv;
   3819 }
   3820 
   3821 /*
   3822  * wm_reset_phy:
   3823  *
   3824  *	generic PHY reset function.
   3825  *	Same as e1000_phy_hw_reset_generic()
   3826  */
   3827 static void
   3828 wm_reset_phy(struct wm_softc *sc)
   3829 {
   3830 	uint32_t reg;
   3831 
   3832 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3833 		device_xname(sc->sc_dev), __func__));
   3834 	if (wm_phy_resetisblocked(sc))
   3835 		return;
   3836 
   3837 	sc->phy.acquire(sc);
   3838 
   3839 	reg = CSR_READ(sc, WMREG_CTRL);
   3840 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3841 	CSR_WRITE_FLUSH(sc);
   3842 
   3843 	delay(sc->phy.reset_delay_us);
   3844 
   3845 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3846 	CSR_WRITE_FLUSH(sc);
   3847 
   3848 	delay(150);
   3849 
   3850 	sc->phy.release(sc);
   3851 
   3852 	wm_get_cfg_done(sc);
   3853 }
   3854 
   3855 static void
   3856 wm_flush_desc_rings(struct wm_softc *sc)
   3857 {
   3858 	pcireg_t preg;
   3859 	uint32_t reg;
   3860 	int nexttx;
   3861 
   3862 	/* First, disable MULR fix in FEXTNVM11 */
   3863 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3864 	reg |= FEXTNVM11_DIS_MULRFIX;
   3865 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3866 
   3867 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3868 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3869 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3870 		struct wm_txqueue *txq;
   3871 		wiseman_txdesc_t *txd;
   3872 
   3873 		/* TX */
   3874 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3875 		    device_xname(sc->sc_dev), preg, reg);
   3876 		reg = CSR_READ(sc, WMREG_TCTL);
   3877 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3878 
   3879 		txq = &sc->sc_queue[0].wmq_txq;
   3880 		nexttx = txq->txq_next;
   3881 		txd = &txq->txq_descs[nexttx];
   3882 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3883 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3884 		txd->wtx_fields.wtxu_status = 0;
   3885 		txd->wtx_fields.wtxu_options = 0;
   3886 		txd->wtx_fields.wtxu_vlan = 0;
   3887 
   3888 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3889 			BUS_SPACE_BARRIER_WRITE);
   3890 
   3891 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3892 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3893 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3894 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3895 		delay(250);
   3896 	}
   3897 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3898 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3899 		uint32_t rctl;
   3900 
   3901 		/* RX */
   3902 		printf("%s: Need RX flush (reg = %08x)\n",
   3903 		    device_xname(sc->sc_dev), preg);
   3904 		rctl = CSR_READ(sc, WMREG_RCTL);
   3905 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3906 		CSR_WRITE_FLUSH(sc);
   3907 		delay(150);
   3908 
   3909 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3910 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3911 		reg &= 0xffffc000;
   3912 		/*
   3913 		 * update thresholds: prefetch threshold to 31, host threshold
   3914 		 * to 1 and make sure the granularity is "descriptors" and not
   3915 		 * "cache lines"
   3916 		 */
   3917 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3918 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3919 
   3920 		/*
   3921 		 * momentarily enable the RX ring for the changes to take
   3922 		 * effect
   3923 		 */
   3924 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3925 		CSR_WRITE_FLUSH(sc);
   3926 		delay(150);
   3927 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3928 	}
   3929 }
   3930 
   3931 /*
   3932  * wm_reset:
   3933  *
   3934  *	Reset the i82542 chip.
   3935  */
   3936 static void
   3937 wm_reset(struct wm_softc *sc)
   3938 {
   3939 	int phy_reset = 0;
   3940 	int i, error = 0;
   3941 	uint32_t reg;
   3942 
   3943 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3944 		device_xname(sc->sc_dev), __func__));
   3945 	KASSERT(sc->sc_type != 0);
   3946 
   3947 	/*
   3948 	 * Allocate on-chip memory according to the MTU size.
   3949 	 * The Packet Buffer Allocation register must be written
   3950 	 * before the chip is reset.
   3951 	 */
   3952 	switch (sc->sc_type) {
   3953 	case WM_T_82547:
   3954 	case WM_T_82547_2:
   3955 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3956 		    PBA_22K : PBA_30K;
   3957 		for (i = 0; i < sc->sc_nqueues; i++) {
   3958 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3959 			txq->txq_fifo_head = 0;
   3960 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3961 			txq->txq_fifo_size =
   3962 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3963 			txq->txq_fifo_stall = 0;
   3964 		}
   3965 		break;
   3966 	case WM_T_82571:
   3967 	case WM_T_82572:
   3968 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3969 	case WM_T_80003:
   3970 		sc->sc_pba = PBA_32K;
   3971 		break;
   3972 	case WM_T_82573:
   3973 		sc->sc_pba = PBA_12K;
   3974 		break;
   3975 	case WM_T_82574:
   3976 	case WM_T_82583:
   3977 		sc->sc_pba = PBA_20K;
   3978 		break;
   3979 	case WM_T_82576:
   3980 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3981 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3982 		break;
   3983 	case WM_T_82580:
   3984 	case WM_T_I350:
   3985 	case WM_T_I354:
   3986 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3987 		break;
   3988 	case WM_T_I210:
   3989 	case WM_T_I211:
   3990 		sc->sc_pba = PBA_34K;
   3991 		break;
   3992 	case WM_T_ICH8:
   3993 		/* Workaround for a bit corruption issue in FIFO memory */
   3994 		sc->sc_pba = PBA_8K;
   3995 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3996 		break;
   3997 	case WM_T_ICH9:
   3998 	case WM_T_ICH10:
   3999 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4000 		    PBA_14K : PBA_10K;
   4001 		break;
   4002 	case WM_T_PCH:
   4003 	case WM_T_PCH2:
   4004 	case WM_T_PCH_LPT:
   4005 	case WM_T_PCH_SPT:
   4006 		sc->sc_pba = PBA_26K;
   4007 		break;
   4008 	default:
   4009 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4010 		    PBA_40K : PBA_48K;
   4011 		break;
   4012 	}
   4013 	/*
   4014 	 * Only old or non-multiqueue devices have the PBA register
   4015 	 * XXX Need special handling for 82575.
   4016 	 */
   4017 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4018 	    || (sc->sc_type == WM_T_82575))
   4019 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4020 
   4021 	/* Prevent the PCI-E bus from sticking */
   4022 	if (sc->sc_flags & WM_F_PCIE) {
   4023 		int timeout = 800;
   4024 
   4025 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4026 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4027 
   4028 		while (timeout--) {
   4029 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4030 			    == 0)
   4031 				break;
   4032 			delay(100);
   4033 		}
   4034 	}
   4035 
   4036 	/* Set the completion timeout for interface */
   4037 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4038 	    || (sc->sc_type == WM_T_82580)
   4039 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4040 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4041 		wm_set_pcie_completion_timeout(sc);
   4042 
   4043 	/* Clear interrupt */
   4044 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4045 	if (sc->sc_nintrs > 1) {
   4046 		if (sc->sc_type != WM_T_82574) {
   4047 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4048 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4049 		} else {
   4050 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4051 		}
   4052 	}
   4053 
   4054 	/* Stop the transmit and receive processes. */
   4055 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4056 	sc->sc_rctl &= ~RCTL_EN;
   4057 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4058 	CSR_WRITE_FLUSH(sc);
   4059 
   4060 	/* XXX set_tbi_sbp_82543() */
   4061 
   4062 	delay(10*1000);
   4063 
   4064 	/* Must acquire the MDIO ownership before MAC reset */
   4065 	switch (sc->sc_type) {
   4066 	case WM_T_82573:
   4067 	case WM_T_82574:
   4068 	case WM_T_82583:
   4069 		error = wm_get_hw_semaphore_82573(sc);
   4070 		break;
   4071 	default:
   4072 		break;
   4073 	}
   4074 
   4075 	/*
   4076 	 * 82541 Errata 29? & 82547 Errata 28?
   4077 	 * See also the description about PHY_RST bit in CTRL register
   4078 	 * in 8254x_GBe_SDM.pdf.
   4079 	 */
   4080 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4081 		CSR_WRITE(sc, WMREG_CTRL,
   4082 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4083 		CSR_WRITE_FLUSH(sc);
   4084 		delay(5000);
   4085 	}
   4086 
   4087 	switch (sc->sc_type) {
   4088 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4089 	case WM_T_82541:
   4090 	case WM_T_82541_2:
   4091 	case WM_T_82547:
   4092 	case WM_T_82547_2:
   4093 		/*
   4094 		 * On some chipsets, a reset through a memory-mapped write
   4095 		 * cycle can cause the chip to reset before completing the
   4096 		 * write cycle.  This causes major headache that can be
   4097 		 * avoided by issuing the reset via indirect register writes
   4098 		 * through I/O space.
   4099 		 *
   4100 		 * So, if we successfully mapped the I/O BAR at attach time,
   4101 		 * use that.  Otherwise, try our luck with a memory-mapped
   4102 		 * reset.
   4103 		 */
   4104 		if (sc->sc_flags & WM_F_IOH_VALID)
   4105 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4106 		else
   4107 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4108 		break;
   4109 	case WM_T_82545_3:
   4110 	case WM_T_82546_3:
   4111 		/* Use the shadow control register on these chips. */
   4112 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4113 		break;
   4114 	case WM_T_80003:
   4115 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4116 		sc->phy.acquire(sc);
   4117 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4118 		sc->phy.release(sc);
   4119 		break;
   4120 	case WM_T_ICH8:
   4121 	case WM_T_ICH9:
   4122 	case WM_T_ICH10:
   4123 	case WM_T_PCH:
   4124 	case WM_T_PCH2:
   4125 	case WM_T_PCH_LPT:
   4126 	case WM_T_PCH_SPT:
   4127 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4128 		if (wm_phy_resetisblocked(sc) == false) {
   4129 			/*
   4130 			 * Gate automatic PHY configuration by hardware on
   4131 			 * non-managed 82579
   4132 			 */
   4133 			if ((sc->sc_type == WM_T_PCH2)
   4134 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4135 				== 0))
   4136 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4137 
   4138 			reg |= CTRL_PHY_RESET;
   4139 			phy_reset = 1;
   4140 		} else
   4141 			printf("XXX reset is blocked!!!\n");
   4142 		sc->phy.acquire(sc);
   4143 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4144 		/* Don't insert a completion barrier when reset */
   4145 		delay(20*1000);
   4146 		mutex_exit(sc->sc_ich_phymtx);
   4147 		break;
   4148 	case WM_T_82580:
   4149 	case WM_T_I350:
   4150 	case WM_T_I354:
   4151 	case WM_T_I210:
   4152 	case WM_T_I211:
   4153 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4154 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4155 			CSR_WRITE_FLUSH(sc);
   4156 		delay(5000);
   4157 		break;
   4158 	case WM_T_82542_2_0:
   4159 	case WM_T_82542_2_1:
   4160 	case WM_T_82543:
   4161 	case WM_T_82540:
   4162 	case WM_T_82545:
   4163 	case WM_T_82546:
   4164 	case WM_T_82571:
   4165 	case WM_T_82572:
   4166 	case WM_T_82573:
   4167 	case WM_T_82574:
   4168 	case WM_T_82575:
   4169 	case WM_T_82576:
   4170 	case WM_T_82583:
   4171 	default:
   4172 		/* Everything else can safely use the documented method. */
   4173 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4174 		break;
   4175 	}
   4176 
   4177 	/* Must release the MDIO ownership after MAC reset */
   4178 	switch (sc->sc_type) {
   4179 	case WM_T_82573:
   4180 	case WM_T_82574:
   4181 	case WM_T_82583:
   4182 		if (error == 0)
   4183 			wm_put_hw_semaphore_82573(sc);
   4184 		break;
   4185 	default:
   4186 		break;
   4187 	}
   4188 
   4189 	if (phy_reset != 0)
   4190 		wm_get_cfg_done(sc);
   4191 
   4192 	/* reload EEPROM */
   4193 	switch (sc->sc_type) {
   4194 	case WM_T_82542_2_0:
   4195 	case WM_T_82542_2_1:
   4196 	case WM_T_82543:
   4197 	case WM_T_82544:
   4198 		delay(10);
   4199 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4200 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4201 		CSR_WRITE_FLUSH(sc);
   4202 		delay(2000);
   4203 		break;
   4204 	case WM_T_82540:
   4205 	case WM_T_82545:
   4206 	case WM_T_82545_3:
   4207 	case WM_T_82546:
   4208 	case WM_T_82546_3:
   4209 		delay(5*1000);
   4210 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4211 		break;
   4212 	case WM_T_82541:
   4213 	case WM_T_82541_2:
   4214 	case WM_T_82547:
   4215 	case WM_T_82547_2:
   4216 		delay(20000);
   4217 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4218 		break;
   4219 	case WM_T_82571:
   4220 	case WM_T_82572:
   4221 	case WM_T_82573:
   4222 	case WM_T_82574:
   4223 	case WM_T_82583:
   4224 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4225 			delay(10);
   4226 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4227 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4228 			CSR_WRITE_FLUSH(sc);
   4229 		}
   4230 		/* check EECD_EE_AUTORD */
   4231 		wm_get_auto_rd_done(sc);
   4232 		/*
   4233 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4234 		 * is set.
   4235 		 */
   4236 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4237 		    || (sc->sc_type == WM_T_82583))
   4238 			delay(25*1000);
   4239 		break;
   4240 	case WM_T_82575:
   4241 	case WM_T_82576:
   4242 	case WM_T_82580:
   4243 	case WM_T_I350:
   4244 	case WM_T_I354:
   4245 	case WM_T_I210:
   4246 	case WM_T_I211:
   4247 	case WM_T_80003:
   4248 		/* check EECD_EE_AUTORD */
   4249 		wm_get_auto_rd_done(sc);
   4250 		break;
   4251 	case WM_T_ICH8:
   4252 	case WM_T_ICH9:
   4253 	case WM_T_ICH10:
   4254 	case WM_T_PCH:
   4255 	case WM_T_PCH2:
   4256 	case WM_T_PCH_LPT:
   4257 	case WM_T_PCH_SPT:
   4258 		break;
   4259 	default:
   4260 		panic("%s: unknown type\n", __func__);
   4261 	}
   4262 
   4263 	/* Check whether EEPROM is present or not */
   4264 	switch (sc->sc_type) {
   4265 	case WM_T_82575:
   4266 	case WM_T_82576:
   4267 	case WM_T_82580:
   4268 	case WM_T_I350:
   4269 	case WM_T_I354:
   4270 	case WM_T_ICH8:
   4271 	case WM_T_ICH9:
   4272 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4273 			/* Not found */
   4274 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4275 			if (sc->sc_type == WM_T_82575)
   4276 				wm_reset_init_script_82575(sc);
   4277 		}
   4278 		break;
   4279 	default:
   4280 		break;
   4281 	}
   4282 
   4283 	if ((sc->sc_type == WM_T_82580)
   4284 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4285 		/* clear global device reset status bit */
   4286 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4287 	}
   4288 
   4289 	/* Clear any pending interrupt events. */
   4290 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4291 	reg = CSR_READ(sc, WMREG_ICR);
   4292 	if (sc->sc_nintrs > 1) {
   4293 		if (sc->sc_type != WM_T_82574) {
   4294 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4295 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4296 		} else
   4297 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4298 	}
   4299 
   4300 	/* reload sc_ctrl */
   4301 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4302 
   4303 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4304 		wm_set_eee_i350(sc);
   4305 
   4306 	/* Clear the host wakeup bit after lcd reset */
   4307 	if (sc->sc_type >= WM_T_PCH) {
   4308 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4309 		    BM_PORT_GEN_CFG);
   4310 		reg &= ~BM_WUC_HOST_WU_BIT;
   4311 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4312 		    BM_PORT_GEN_CFG, reg);
   4313 	}
   4314 
   4315 	/*
   4316 	 * For PCH, this write will make sure that any noise will be detected
   4317 	 * as a CRC error and be dropped rather than show up as a bad packet
   4318 	 * to the DMA engine
   4319 	 */
   4320 	if (sc->sc_type == WM_T_PCH)
   4321 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4322 
   4323 	if (sc->sc_type >= WM_T_82544)
   4324 		CSR_WRITE(sc, WMREG_WUC, 0);
   4325 
   4326 	wm_reset_mdicnfg_82580(sc);
   4327 
   4328 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4329 		wm_pll_workaround_i210(sc);
   4330 }
   4331 
   4332 /*
   4333  * wm_add_rxbuf:
   4334  *
   4335  *	Add a receive buffer to the indiciated descriptor.
   4336  */
   4337 static int
   4338 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4339 {
   4340 	struct wm_softc *sc = rxq->rxq_sc;
   4341 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4342 	struct mbuf *m;
   4343 	int error;
   4344 
   4345 	KASSERT(mutex_owned(rxq->rxq_lock));
   4346 
   4347 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4348 	if (m == NULL)
   4349 		return ENOBUFS;
   4350 
   4351 	MCLGET(m, M_DONTWAIT);
   4352 	if ((m->m_flags & M_EXT) == 0) {
   4353 		m_freem(m);
   4354 		return ENOBUFS;
   4355 	}
   4356 
   4357 	if (rxs->rxs_mbuf != NULL)
   4358 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4359 
   4360 	rxs->rxs_mbuf = m;
   4361 
   4362 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4363 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4364 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4365 	if (error) {
   4366 		/* XXX XXX XXX */
   4367 		aprint_error_dev(sc->sc_dev,
   4368 		    "unable to load rx DMA map %d, error = %d\n",
   4369 		    idx, error);
   4370 		panic("wm_add_rxbuf");
   4371 	}
   4372 
   4373 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4374 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4375 
   4376 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4377 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4378 			wm_init_rxdesc(rxq, idx);
   4379 	} else
   4380 		wm_init_rxdesc(rxq, idx);
   4381 
   4382 	return 0;
   4383 }
   4384 
   4385 /*
   4386  * wm_rxdrain:
   4387  *
   4388  *	Drain the receive queue.
   4389  */
   4390 static void
   4391 wm_rxdrain(struct wm_rxqueue *rxq)
   4392 {
   4393 	struct wm_softc *sc = rxq->rxq_sc;
   4394 	struct wm_rxsoft *rxs;
   4395 	int i;
   4396 
   4397 	KASSERT(mutex_owned(rxq->rxq_lock));
   4398 
   4399 	for (i = 0; i < WM_NRXDESC; i++) {
   4400 		rxs = &rxq->rxq_soft[i];
   4401 		if (rxs->rxs_mbuf != NULL) {
   4402 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4403 			m_freem(rxs->rxs_mbuf);
   4404 			rxs->rxs_mbuf = NULL;
   4405 		}
   4406 	}
   4407 }
   4408 
   4409 
   4410 /*
   4411  * XXX copy from FreeBSD's sys/net/rss_config.c
   4412  */
   4413 /*
   4414  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4415  * effectiveness may be limited by algorithm choice and available entropy
   4416  * during the boot.
   4417  *
   4418  * XXXRW: And that we don't randomize it yet!
   4419  *
   4420  * This is the default Microsoft RSS specification key which is also
   4421  * the Chelsio T5 firmware default key.
   4422  */
   4423 #define RSS_KEYSIZE 40
   4424 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4425 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4426 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4427 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4428 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4429 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4430 };
   4431 
   4432 /*
   4433  * Caller must pass an array of size sizeof(rss_key).
   4434  *
   4435  * XXX
   4436  * As if_ixgbe may use this function, this function should not be
   4437  * if_wm specific function.
   4438  */
   4439 static void
   4440 wm_rss_getkey(uint8_t *key)
   4441 {
   4442 
   4443 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4444 }
   4445 
   4446 /*
   4447  * Setup registers for RSS.
   4448  *
   4449  * XXX not yet VMDq support
   4450  */
   4451 static void
   4452 wm_init_rss(struct wm_softc *sc)
   4453 {
   4454 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4455 	int i;
   4456 
   4457 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4458 
   4459 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4460 		int qid, reta_ent;
   4461 
   4462 		qid  = i % sc->sc_nqueues;
   4463 		switch(sc->sc_type) {
   4464 		case WM_T_82574:
   4465 			reta_ent = __SHIFTIN(qid,
   4466 			    RETA_ENT_QINDEX_MASK_82574);
   4467 			break;
   4468 		case WM_T_82575:
   4469 			reta_ent = __SHIFTIN(qid,
   4470 			    RETA_ENT_QINDEX1_MASK_82575);
   4471 			break;
   4472 		default:
   4473 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4474 			break;
   4475 		}
   4476 
   4477 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4478 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4479 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4480 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4481 	}
   4482 
   4483 	wm_rss_getkey((uint8_t *)rss_key);
   4484 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4485 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4486 
   4487 	if (sc->sc_type == WM_T_82574)
   4488 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4489 	else
   4490 		mrqc = MRQC_ENABLE_RSS_MQ;
   4491 
   4492 	/*
   4493 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4494 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4495 	 */
   4496 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4497 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4498 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4499 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4500 
   4501 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4502 }
   4503 
   4504 /*
   4505  * Adjust TX and RX queue numbers which the system actulally uses.
   4506  *
   4507  * The numbers are affected by below parameters.
   4508  *     - The nubmer of hardware queues
   4509  *     - The number of MSI-X vectors (= "nvectors" argument)
   4510  *     - ncpu
   4511  */
   4512 static void
   4513 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4514 {
   4515 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4516 
   4517 	if (nvectors < 2) {
   4518 		sc->sc_nqueues = 1;
   4519 		return;
   4520 	}
   4521 
   4522 	switch(sc->sc_type) {
   4523 	case WM_T_82572:
   4524 		hw_ntxqueues = 2;
   4525 		hw_nrxqueues = 2;
   4526 		break;
   4527 	case WM_T_82574:
   4528 		hw_ntxqueues = 2;
   4529 		hw_nrxqueues = 2;
   4530 		break;
   4531 	case WM_T_82575:
   4532 		hw_ntxqueues = 4;
   4533 		hw_nrxqueues = 4;
   4534 		break;
   4535 	case WM_T_82576:
   4536 		hw_ntxqueues = 16;
   4537 		hw_nrxqueues = 16;
   4538 		break;
   4539 	case WM_T_82580:
   4540 	case WM_T_I350:
   4541 	case WM_T_I354:
   4542 		hw_ntxqueues = 8;
   4543 		hw_nrxqueues = 8;
   4544 		break;
   4545 	case WM_T_I210:
   4546 		hw_ntxqueues = 4;
   4547 		hw_nrxqueues = 4;
   4548 		break;
   4549 	case WM_T_I211:
   4550 		hw_ntxqueues = 2;
   4551 		hw_nrxqueues = 2;
   4552 		break;
   4553 		/*
   4554 		 * As below ethernet controllers does not support MSI-X,
   4555 		 * this driver let them not use multiqueue.
   4556 		 *     - WM_T_80003
   4557 		 *     - WM_T_ICH8
   4558 		 *     - WM_T_ICH9
   4559 		 *     - WM_T_ICH10
   4560 		 *     - WM_T_PCH
   4561 		 *     - WM_T_PCH2
   4562 		 *     - WM_T_PCH_LPT
   4563 		 */
   4564 	default:
   4565 		hw_ntxqueues = 1;
   4566 		hw_nrxqueues = 1;
   4567 		break;
   4568 	}
   4569 
   4570 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4571 
   4572 	/*
   4573 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4574 	 * the number of queues used actually.
   4575 	 */
   4576 	if (nvectors < hw_nqueues + 1) {
   4577 		sc->sc_nqueues = nvectors - 1;
   4578 	} else {
   4579 		sc->sc_nqueues = hw_nqueues;
   4580 	}
   4581 
   4582 	/*
   4583 	 * As queues more then cpus cannot improve scaling, we limit
   4584 	 * the number of queues used actually.
   4585 	 */
   4586 	if (ncpu < sc->sc_nqueues)
   4587 		sc->sc_nqueues = ncpu;
   4588 }
   4589 
   4590 static int
   4591 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4592 {
   4593 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4594 	wmq->wmq_id = qidx;
   4595 	wmq->wmq_intr_idx = intr_idx;
   4596 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4597 #ifdef WM_MPSAFE
   4598 	    | SOFTINT_MPSAFE
   4599 #endif
   4600 	    , wm_handle_queue, wmq);
   4601 	if (wmq->wmq_si != NULL)
   4602 		return 0;
   4603 
   4604 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4605 	    wmq->wmq_id);
   4606 
   4607 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4608 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4609 	return ENOMEM;
   4610 }
   4611 
   4612 /*
   4613  * Both single interrupt MSI and INTx can use this function.
   4614  */
   4615 static int
   4616 wm_setup_legacy(struct wm_softc *sc)
   4617 {
   4618 	pci_chipset_tag_t pc = sc->sc_pc;
   4619 	const char *intrstr = NULL;
   4620 	char intrbuf[PCI_INTRSTR_LEN];
   4621 	int error;
   4622 
   4623 	error = wm_alloc_txrx_queues(sc);
   4624 	if (error) {
   4625 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4626 		    error);
   4627 		return ENOMEM;
   4628 	}
   4629 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4630 	    sizeof(intrbuf));
   4631 #ifdef WM_MPSAFE
   4632 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4633 #endif
   4634 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4635 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4636 	if (sc->sc_ihs[0] == NULL) {
   4637 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4638 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4639 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4640 		return ENOMEM;
   4641 	}
   4642 
   4643 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4644 	sc->sc_nintrs = 1;
   4645 
   4646 	return wm_softint_establish(sc, 0, 0);
   4647 }
   4648 
   4649 static int
   4650 wm_setup_msix(struct wm_softc *sc)
   4651 {
   4652 	void *vih;
   4653 	kcpuset_t *affinity;
   4654 	int qidx, error, intr_idx, txrx_established;
   4655 	pci_chipset_tag_t pc = sc->sc_pc;
   4656 	const char *intrstr = NULL;
   4657 	char intrbuf[PCI_INTRSTR_LEN];
   4658 	char intr_xname[INTRDEVNAMEBUF];
   4659 
   4660 	if (sc->sc_nqueues < ncpu) {
   4661 		/*
   4662 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4663 		 * interrupts start from CPU#1.
   4664 		 */
   4665 		sc->sc_affinity_offset = 1;
   4666 	} else {
   4667 		/*
   4668 		 * In this case, this device use all CPUs. So, we unify
   4669 		 * affinitied cpu_index to msix vector number for readability.
   4670 		 */
   4671 		sc->sc_affinity_offset = 0;
   4672 	}
   4673 
   4674 	error = wm_alloc_txrx_queues(sc);
   4675 	if (error) {
   4676 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4677 		    error);
   4678 		return ENOMEM;
   4679 	}
   4680 
   4681 	kcpuset_create(&affinity, false);
   4682 	intr_idx = 0;
   4683 
   4684 	/*
   4685 	 * TX and RX
   4686 	 */
   4687 	txrx_established = 0;
   4688 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4689 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4690 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4691 
   4692 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4693 		    sizeof(intrbuf));
   4694 #ifdef WM_MPSAFE
   4695 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4696 		    PCI_INTR_MPSAFE, true);
   4697 #endif
   4698 		memset(intr_xname, 0, sizeof(intr_xname));
   4699 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4700 		    device_xname(sc->sc_dev), qidx);
   4701 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4702 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4703 		if (vih == NULL) {
   4704 			aprint_error_dev(sc->sc_dev,
   4705 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4706 			    intrstr ? " at " : "",
   4707 			    intrstr ? intrstr : "");
   4708 
   4709 			goto fail;
   4710 		}
   4711 		kcpuset_zero(affinity);
   4712 		/* Round-robin affinity */
   4713 		kcpuset_set(affinity, affinity_to);
   4714 		error = interrupt_distribute(vih, affinity, NULL);
   4715 		if (error == 0) {
   4716 			aprint_normal_dev(sc->sc_dev,
   4717 			    "for TX and RX interrupting at %s affinity to %u\n",
   4718 			    intrstr, affinity_to);
   4719 		} else {
   4720 			aprint_normal_dev(sc->sc_dev,
   4721 			    "for TX and RX interrupting at %s\n", intrstr);
   4722 		}
   4723 		sc->sc_ihs[intr_idx] = vih;
   4724 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4725 			goto fail;
   4726 		txrx_established++;
   4727 		intr_idx++;
   4728 	}
   4729 
   4730 	/*
   4731 	 * LINK
   4732 	 */
   4733 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4734 	    sizeof(intrbuf));
   4735 #ifdef WM_MPSAFE
   4736 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4737 #endif
   4738 	memset(intr_xname, 0, sizeof(intr_xname));
   4739 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4740 	    device_xname(sc->sc_dev));
   4741 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4742 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4743 	if (vih == NULL) {
   4744 		aprint_error_dev(sc->sc_dev,
   4745 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4746 		    intrstr ? " at " : "",
   4747 		    intrstr ? intrstr : "");
   4748 
   4749 		goto fail;
   4750 	}
   4751 	/* keep default affinity to LINK interrupt */
   4752 	aprint_normal_dev(sc->sc_dev,
   4753 	    "for LINK interrupting at %s\n", intrstr);
   4754 	sc->sc_ihs[intr_idx] = vih;
   4755 	sc->sc_link_intr_idx = intr_idx;
   4756 
   4757 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4758 	kcpuset_destroy(affinity);
   4759 	return 0;
   4760 
   4761  fail:
   4762 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4763 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4764 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4765 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4766 	}
   4767 
   4768 	kcpuset_destroy(affinity);
   4769 	return ENOMEM;
   4770 }
   4771 
   4772 static void
   4773 wm_turnon(struct wm_softc *sc)
   4774 {
   4775 	int i;
   4776 
   4777 	KASSERT(WM_CORE_LOCKED(sc));
   4778 
   4779 	/*
   4780 	 * must unset stopping flags in ascending order.
   4781 	 */
   4782 	for(i = 0; i < sc->sc_nqueues; i++) {
   4783 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4784 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4785 
   4786 		mutex_enter(txq->txq_lock);
   4787 		txq->txq_stopping = false;
   4788 		mutex_exit(txq->txq_lock);
   4789 
   4790 		mutex_enter(rxq->rxq_lock);
   4791 		rxq->rxq_stopping = false;
   4792 		mutex_exit(rxq->rxq_lock);
   4793 	}
   4794 
   4795 	sc->sc_core_stopping = false;
   4796 }
   4797 
   4798 static void
   4799 wm_turnoff(struct wm_softc *sc)
   4800 {
   4801 	int i;
   4802 
   4803 	KASSERT(WM_CORE_LOCKED(sc));
   4804 
   4805 	sc->sc_core_stopping = true;
   4806 
   4807 	/*
   4808 	 * must set stopping flags in ascending order.
   4809 	 */
   4810 	for(i = 0; i < sc->sc_nqueues; i++) {
   4811 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4812 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4813 
   4814 		mutex_enter(rxq->rxq_lock);
   4815 		rxq->rxq_stopping = true;
   4816 		mutex_exit(rxq->rxq_lock);
   4817 
   4818 		mutex_enter(txq->txq_lock);
   4819 		txq->txq_stopping = true;
   4820 		mutex_exit(txq->txq_lock);
   4821 	}
   4822 }
   4823 
   4824 /*
   4825  * write interrupt interval value to ITR or EITR
   4826  */
   4827 static void
   4828 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4829 {
   4830 
   4831 	if (!wmq->wmq_set_itr)
   4832 		return;
   4833 
   4834 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4835 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4836 
   4837 		/*
   4838 		 * 82575 doesn't have CNT_INGR field.
   4839 		 * So, overwrite counter field by software.
   4840 		 */
   4841 		if (sc->sc_type == WM_T_82575)
   4842 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4843 		else
   4844 			eitr |= EITR_CNT_INGR;
   4845 
   4846 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4847 	} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   4848 		/*
   4849 		 * 82574 has both ITR and EITR. SET EITR when we use
   4850 		 * the multi queue function with MSI-X.
   4851 		 */
   4852 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4853 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4854 	} else {
   4855 		KASSERT(wmq->wmq_id == 0);
   4856 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4857 	}
   4858 
   4859 	wmq->wmq_set_itr = false;
   4860 }
   4861 
   4862 /*
   4863  * TODO
   4864  * Below dynamic calculation of itr is almost the same as linux igb,
   4865  * however it does not fit to wm(4). So, we will have been disable AIM
   4866  * until we will find appropriate calculation of itr.
   4867  */
   4868 /*
   4869  * calculate interrupt interval value to be going to write register in
   4870  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4871  */
   4872 static void
   4873 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4874 {
   4875 #ifdef NOTYET
   4876 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4877 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4878 	uint32_t avg_size = 0;
   4879 	uint32_t new_itr;
   4880 
   4881 	if (rxq->rxq_packets)
   4882 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4883 	if (txq->txq_packets)
   4884 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4885 
   4886 	if (avg_size == 0) {
   4887 		new_itr = 450; /* restore default value */
   4888 		goto out;
   4889 	}
   4890 
   4891 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4892 	avg_size += 24;
   4893 
   4894 	/* Don't starve jumbo frames */
   4895 	avg_size = min(avg_size, 3000);
   4896 
   4897 	/* Give a little boost to mid-size frames */
   4898 	if ((avg_size > 300) && (avg_size < 1200))
   4899 		new_itr = avg_size / 3;
   4900 	else
   4901 		new_itr = avg_size / 2;
   4902 
   4903 out:
   4904 	/*
   4905 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4906 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4907 	 */
   4908 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4909 		new_itr *= 4;
   4910 
   4911 	if (new_itr != wmq->wmq_itr) {
   4912 		wmq->wmq_itr = new_itr;
   4913 		wmq->wmq_set_itr = true;
   4914 	} else
   4915 		wmq->wmq_set_itr = false;
   4916 
   4917 	rxq->rxq_packets = 0;
   4918 	rxq->rxq_bytes = 0;
   4919 	txq->txq_packets = 0;
   4920 	txq->txq_bytes = 0;
   4921 #endif
   4922 }
   4923 
   4924 /*
   4925  * wm_init:		[ifnet interface function]
   4926  *
   4927  *	Initialize the interface.
   4928  */
   4929 static int
   4930 wm_init(struct ifnet *ifp)
   4931 {
   4932 	struct wm_softc *sc = ifp->if_softc;
   4933 	int ret;
   4934 
   4935 	WM_CORE_LOCK(sc);
   4936 	ret = wm_init_locked(ifp);
   4937 	WM_CORE_UNLOCK(sc);
   4938 
   4939 	return ret;
   4940 }
   4941 
   4942 static int
   4943 wm_init_locked(struct ifnet *ifp)
   4944 {
   4945 	struct wm_softc *sc = ifp->if_softc;
   4946 	int i, j, trynum, error = 0;
   4947 	uint32_t reg;
   4948 
   4949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4950 		device_xname(sc->sc_dev), __func__));
   4951 	KASSERT(WM_CORE_LOCKED(sc));
   4952 
   4953 	/*
   4954 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4955 	 * There is a small but measurable benefit to avoiding the adjusment
   4956 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4957 	 * on such platforms.  One possibility is that the DMA itself is
   4958 	 * slightly more efficient if the front of the entire packet (instead
   4959 	 * of the front of the headers) is aligned.
   4960 	 *
   4961 	 * Note we must always set align_tweak to 0 if we are using
   4962 	 * jumbo frames.
   4963 	 */
   4964 #ifdef __NO_STRICT_ALIGNMENT
   4965 	sc->sc_align_tweak = 0;
   4966 #else
   4967 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4968 		sc->sc_align_tweak = 0;
   4969 	else
   4970 		sc->sc_align_tweak = 2;
   4971 #endif /* __NO_STRICT_ALIGNMENT */
   4972 
   4973 	/* Cancel any pending I/O. */
   4974 	wm_stop_locked(ifp, 0);
   4975 
   4976 	/* update statistics before reset */
   4977 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4978 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4979 
   4980 	/* PCH_SPT hardware workaround */
   4981 	if (sc->sc_type == WM_T_PCH_SPT)
   4982 		wm_flush_desc_rings(sc);
   4983 
   4984 	/* Reset the chip to a known state. */
   4985 	wm_reset(sc);
   4986 
   4987 	/* AMT based hardware can now take control from firmware */
   4988 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4989 		wm_get_hw_control(sc);
   4990 
   4991 	/* Init hardware bits */
   4992 	wm_initialize_hardware_bits(sc);
   4993 
   4994 	/* Reset the PHY. */
   4995 	if (sc->sc_flags & WM_F_HAS_MII)
   4996 		wm_gmii_reset(sc);
   4997 
   4998 	/* Calculate (E)ITR value */
   4999 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5000 		/*
   5001 		 * For NEWQUEUE's EITR (except for 82575).
   5002 		 * 82575's EITR should be set same throttling value as other
   5003 		 * old controllers' ITR because the interrupt/sec calculation
   5004 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5005 		 *
   5006 		 * 82574's EITR should be set same throttling value as ITR.
   5007 		 *
   5008 		 * For N interrupts/sec, set this value to:
   5009 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5010 		 */
   5011 		sc->sc_itr_init = 450;
   5012 	} else if (sc->sc_type >= WM_T_82543) {
   5013 		/*
   5014 		 * Set up the interrupt throttling register (units of 256ns)
   5015 		 * Note that a footnote in Intel's documentation says this
   5016 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5017 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5018 		 * that that is also true for the 1024ns units of the other
   5019 		 * interrupt-related timer registers -- so, really, we ought
   5020 		 * to divide this value by 4 when the link speed is low.
   5021 		 *
   5022 		 * XXX implement this division at link speed change!
   5023 		 */
   5024 
   5025 		/*
   5026 		 * For N interrupts/sec, set this value to:
   5027 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5028 		 * absolute and packet timer values to this value
   5029 		 * divided by 4 to get "simple timer" behavior.
   5030 		 */
   5031 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5032 	}
   5033 
   5034 	error = wm_init_txrx_queues(sc);
   5035 	if (error)
   5036 		goto out;
   5037 
   5038 	/*
   5039 	 * Clear out the VLAN table -- we don't use it (yet).
   5040 	 */
   5041 	CSR_WRITE(sc, WMREG_VET, 0);
   5042 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5043 		trynum = 10; /* Due to hw errata */
   5044 	else
   5045 		trynum = 1;
   5046 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5047 		for (j = 0; j < trynum; j++)
   5048 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5049 
   5050 	/*
   5051 	 * Set up flow-control parameters.
   5052 	 *
   5053 	 * XXX Values could probably stand some tuning.
   5054 	 */
   5055 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5056 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5057 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5058 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5059 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5060 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5061 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5062 	}
   5063 
   5064 	sc->sc_fcrtl = FCRTL_DFLT;
   5065 	if (sc->sc_type < WM_T_82543) {
   5066 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5067 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5068 	} else {
   5069 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5070 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5071 	}
   5072 
   5073 	if (sc->sc_type == WM_T_80003)
   5074 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5075 	else
   5076 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5077 
   5078 	/* Writes the control register. */
   5079 	wm_set_vlan(sc);
   5080 
   5081 	if (sc->sc_flags & WM_F_HAS_MII) {
   5082 		int val;
   5083 
   5084 		switch (sc->sc_type) {
   5085 		case WM_T_80003:
   5086 		case WM_T_ICH8:
   5087 		case WM_T_ICH9:
   5088 		case WM_T_ICH10:
   5089 		case WM_T_PCH:
   5090 		case WM_T_PCH2:
   5091 		case WM_T_PCH_LPT:
   5092 		case WM_T_PCH_SPT:
   5093 			/*
   5094 			 * Set the mac to wait the maximum time between each
   5095 			 * iteration and increase the max iterations when
   5096 			 * polling the phy; this fixes erroneous timeouts at
   5097 			 * 10Mbps.
   5098 			 */
   5099 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5100 			    0xFFFF);
   5101 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5102 			val |= 0x3F;
   5103 			wm_kmrn_writereg(sc,
   5104 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5105 			break;
   5106 		default:
   5107 			break;
   5108 		}
   5109 
   5110 		if (sc->sc_type == WM_T_80003) {
   5111 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5112 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5113 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5114 
   5115 			/* Bypass RX and TX FIFO's */
   5116 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5117 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5118 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5119 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5120 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5121 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5122 		}
   5123 	}
   5124 #if 0
   5125 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5126 #endif
   5127 
   5128 	/* Set up checksum offload parameters. */
   5129 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5130 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5131 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5132 		reg |= RXCSUM_IPOFL;
   5133 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5134 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5135 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5136 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5137 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5138 
   5139 	/* Set up MSI-X */
   5140 	if (sc->sc_nintrs > 1) {
   5141 		uint32_t ivar;
   5142 		struct wm_queue *wmq;
   5143 		int qid, qintr_idx;
   5144 
   5145 		if (sc->sc_type == WM_T_82575) {
   5146 			/* Interrupt control */
   5147 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5148 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5149 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5150 
   5151 			/* TX and RX */
   5152 			for (i = 0; i < sc->sc_nqueues; i++) {
   5153 				wmq = &sc->sc_queue[i];
   5154 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5155 				    EITR_TX_QUEUE(wmq->wmq_id)
   5156 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5157 			}
   5158 			/* Link status */
   5159 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5160 			    EITR_OTHER);
   5161 		} else if (sc->sc_type == WM_T_82574) {
   5162 			/* Interrupt control */
   5163 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5164 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5165 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5166 
   5167 			/*
   5168 			 * workaround issue with spurious interrupts
   5169 			 * in MSI-X mode.
   5170 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5171 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5172 			 */
   5173 			reg = CSR_READ(sc, WMREG_RFCTL);
   5174 			reg |= WMREG_RFCTL_ACKDIS;
   5175 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5176 
   5177 			ivar = 0;
   5178 			/* TX and RX */
   5179 			for (i = 0; i < sc->sc_nqueues; i++) {
   5180 				wmq = &sc->sc_queue[i];
   5181 				qid = wmq->wmq_id;
   5182 				qintr_idx = wmq->wmq_intr_idx;
   5183 
   5184 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5185 				    IVAR_TX_MASK_Q_82574(qid));
   5186 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5187 				    IVAR_RX_MASK_Q_82574(qid));
   5188 			}
   5189 			/* Link status */
   5190 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5191 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5192 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5193 		} else {
   5194 			/* Interrupt control */
   5195 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5196 			    | GPIE_EIAME | GPIE_PBA);
   5197 
   5198 			switch (sc->sc_type) {
   5199 			case WM_T_82580:
   5200 			case WM_T_I350:
   5201 			case WM_T_I354:
   5202 			case WM_T_I210:
   5203 			case WM_T_I211:
   5204 				/* TX and RX */
   5205 				for (i = 0; i < sc->sc_nqueues; i++) {
   5206 					wmq = &sc->sc_queue[i];
   5207 					qid = wmq->wmq_id;
   5208 					qintr_idx = wmq->wmq_intr_idx;
   5209 
   5210 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5211 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5212 					ivar |= __SHIFTIN((qintr_idx
   5213 						| IVAR_VALID),
   5214 					    IVAR_TX_MASK_Q(qid));
   5215 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5216 					ivar |= __SHIFTIN((qintr_idx
   5217 						| IVAR_VALID),
   5218 					    IVAR_RX_MASK_Q(qid));
   5219 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5220 				}
   5221 				break;
   5222 			case WM_T_82576:
   5223 				/* TX and RX */
   5224 				for (i = 0; i < sc->sc_nqueues; i++) {
   5225 					wmq = &sc->sc_queue[i];
   5226 					qid = wmq->wmq_id;
   5227 					qintr_idx = wmq->wmq_intr_idx;
   5228 
   5229 					ivar = CSR_READ(sc,
   5230 					    WMREG_IVAR_Q_82576(qid));
   5231 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5232 					ivar |= __SHIFTIN((qintr_idx
   5233 						| IVAR_VALID),
   5234 					    IVAR_TX_MASK_Q_82576(qid));
   5235 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5236 					ivar |= __SHIFTIN((qintr_idx
   5237 						| IVAR_VALID),
   5238 					    IVAR_RX_MASK_Q_82576(qid));
   5239 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5240 					    ivar);
   5241 				}
   5242 				break;
   5243 			default:
   5244 				break;
   5245 			}
   5246 
   5247 			/* Link status */
   5248 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5249 			    IVAR_MISC_OTHER);
   5250 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5251 		}
   5252 
   5253 		if (sc->sc_nqueues > 1) {
   5254 			wm_init_rss(sc);
   5255 
   5256 			/*
   5257 			** NOTE: Receive Full-Packet Checksum Offload
   5258 			** is mutually exclusive with Multiqueue. However
   5259 			** this is not the same as TCP/IP checksums which
   5260 			** still work.
   5261 			*/
   5262 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5263 			reg |= RXCSUM_PCSD;
   5264 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5265 		}
   5266 	}
   5267 
   5268 	/* Set up the interrupt registers. */
   5269 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5270 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5271 	    ICR_RXO | ICR_RXT0;
   5272 	if (sc->sc_nintrs > 1) {
   5273 		uint32_t mask;
   5274 		struct wm_queue *wmq;
   5275 
   5276 		switch (sc->sc_type) {
   5277 		case WM_T_82574:
   5278 			mask = 0;
   5279 			for (i = 0; i < sc->sc_nqueues; i++) {
   5280 				wmq = &sc->sc_queue[i];
   5281 				mask |= ICR_TXQ(wmq->wmq_id);
   5282 				mask |= ICR_RXQ(wmq->wmq_id);
   5283 			}
   5284 			mask |= ICR_OTHER;
   5285 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5286 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5287 			break;
   5288 		default:
   5289 			if (sc->sc_type == WM_T_82575) {
   5290 				mask = 0;
   5291 				for (i = 0; i < sc->sc_nqueues; i++) {
   5292 					wmq = &sc->sc_queue[i];
   5293 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5294 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5295 				}
   5296 				mask |= EITR_OTHER;
   5297 			} else {
   5298 				mask = 0;
   5299 				for (i = 0; i < sc->sc_nqueues; i++) {
   5300 					wmq = &sc->sc_queue[i];
   5301 					mask |= 1 << wmq->wmq_intr_idx;
   5302 				}
   5303 				mask |= 1 << sc->sc_link_intr_idx;
   5304 			}
   5305 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5306 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5307 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5308 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5309 			break;
   5310 		}
   5311 	} else
   5312 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5313 
   5314 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5315 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5316 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5317 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5318 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5319 		reg |= KABGTXD_BGSQLBIAS;
   5320 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5321 	}
   5322 
   5323 	/* Set up the inter-packet gap. */
   5324 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5325 
   5326 	if (sc->sc_type >= WM_T_82543) {
   5327 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5328 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5329 			wm_itrs_writereg(sc, wmq);
   5330 		}
   5331 		/*
   5332 		 * Link interrupts occur much less than TX
   5333 		 * interrupts and RX interrupts. So, we don't
   5334 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5335 		 * FreeBSD's if_igb.
   5336 		 */
   5337 	}
   5338 
   5339 	/* Set the VLAN ethernetype. */
   5340 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5341 
   5342 	/*
   5343 	 * Set up the transmit control register; we start out with
   5344 	 * a collision distance suitable for FDX, but update it whe
   5345 	 * we resolve the media type.
   5346 	 */
   5347 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5348 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5349 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5350 	if (sc->sc_type >= WM_T_82571)
   5351 		sc->sc_tctl |= TCTL_MULR;
   5352 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5353 
   5354 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5355 		/* Write TDT after TCTL.EN is set. See the document. */
   5356 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5357 	}
   5358 
   5359 	if (sc->sc_type == WM_T_80003) {
   5360 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5361 		reg &= ~TCTL_EXT_GCEX_MASK;
   5362 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5363 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5364 	}
   5365 
   5366 	/* Set the media. */
   5367 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5368 		goto out;
   5369 
   5370 	/* Configure for OS presence */
   5371 	wm_init_manageability(sc);
   5372 
   5373 	/*
   5374 	 * Set up the receive control register; we actually program
   5375 	 * the register when we set the receive filter.  Use multicast
   5376 	 * address offset type 0.
   5377 	 *
   5378 	 * Only the i82544 has the ability to strip the incoming
   5379 	 * CRC, so we don't enable that feature.
   5380 	 */
   5381 	sc->sc_mchash_type = 0;
   5382 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5383 	    | RCTL_MO(sc->sc_mchash_type);
   5384 
   5385 	/*
   5386 	 * 82574 use one buffer extended Rx descriptor.
   5387 	 */
   5388 	if (sc->sc_type == WM_T_82574)
   5389 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5390 
   5391 	/*
   5392 	 * The I350 has a bug where it always strips the CRC whether
   5393 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5394 	 */
   5395 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5396 	    || (sc->sc_type == WM_T_I210))
   5397 		sc->sc_rctl |= RCTL_SECRC;
   5398 
   5399 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5400 	    && (ifp->if_mtu > ETHERMTU)) {
   5401 		sc->sc_rctl |= RCTL_LPE;
   5402 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5403 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5404 	}
   5405 
   5406 	if (MCLBYTES == 2048) {
   5407 		sc->sc_rctl |= RCTL_2k;
   5408 	} else {
   5409 		if (sc->sc_type >= WM_T_82543) {
   5410 			switch (MCLBYTES) {
   5411 			case 4096:
   5412 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5413 				break;
   5414 			case 8192:
   5415 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5416 				break;
   5417 			case 16384:
   5418 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5419 				break;
   5420 			default:
   5421 				panic("wm_init: MCLBYTES %d unsupported",
   5422 				    MCLBYTES);
   5423 				break;
   5424 			}
   5425 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5426 	}
   5427 
   5428 	/* Set the receive filter. */
   5429 	wm_set_filter(sc);
   5430 
   5431 	/* Enable ECC */
   5432 	switch (sc->sc_type) {
   5433 	case WM_T_82571:
   5434 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5435 		reg |= PBA_ECC_CORR_EN;
   5436 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5437 		break;
   5438 	case WM_T_PCH_LPT:
   5439 	case WM_T_PCH_SPT:
   5440 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5441 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5442 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5443 
   5444 		sc->sc_ctrl |= CTRL_MEHE;
   5445 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5446 		break;
   5447 	default:
   5448 		break;
   5449 	}
   5450 
   5451 	/* On 575 and later set RDT only if RX enabled */
   5452 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5453 		int qidx;
   5454 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5455 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5456 			for (i = 0; i < WM_NRXDESC; i++) {
   5457 				mutex_enter(rxq->rxq_lock);
   5458 				wm_init_rxdesc(rxq, i);
   5459 				mutex_exit(rxq->rxq_lock);
   5460 
   5461 			}
   5462 		}
   5463 	}
   5464 
   5465 	wm_turnon(sc);
   5466 
   5467 	/* Start the one second link check clock. */
   5468 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5469 
   5470 	/* ...all done! */
   5471 	ifp->if_flags |= IFF_RUNNING;
   5472 	ifp->if_flags &= ~IFF_OACTIVE;
   5473 
   5474  out:
   5475 	sc->sc_if_flags = ifp->if_flags;
   5476 	if (error)
   5477 		log(LOG_ERR, "%s: interface not running\n",
   5478 		    device_xname(sc->sc_dev));
   5479 	return error;
   5480 }
   5481 
   5482 /*
   5483  * wm_stop:		[ifnet interface function]
   5484  *
   5485  *	Stop transmission on the interface.
   5486  */
   5487 static void
   5488 wm_stop(struct ifnet *ifp, int disable)
   5489 {
   5490 	struct wm_softc *sc = ifp->if_softc;
   5491 
   5492 	WM_CORE_LOCK(sc);
   5493 	wm_stop_locked(ifp, disable);
   5494 	WM_CORE_UNLOCK(sc);
   5495 }
   5496 
   5497 static void
   5498 wm_stop_locked(struct ifnet *ifp, int disable)
   5499 {
   5500 	struct wm_softc *sc = ifp->if_softc;
   5501 	struct wm_txsoft *txs;
   5502 	int i, qidx;
   5503 
   5504 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5505 		device_xname(sc->sc_dev), __func__));
   5506 	KASSERT(WM_CORE_LOCKED(sc));
   5507 
   5508 	wm_turnoff(sc);
   5509 
   5510 	/* Stop the one second clock. */
   5511 	callout_stop(&sc->sc_tick_ch);
   5512 
   5513 	/* Stop the 82547 Tx FIFO stall check timer. */
   5514 	if (sc->sc_type == WM_T_82547)
   5515 		callout_stop(&sc->sc_txfifo_ch);
   5516 
   5517 	if (sc->sc_flags & WM_F_HAS_MII) {
   5518 		/* Down the MII. */
   5519 		mii_down(&sc->sc_mii);
   5520 	} else {
   5521 #if 0
   5522 		/* Should we clear PHY's status properly? */
   5523 		wm_reset(sc);
   5524 #endif
   5525 	}
   5526 
   5527 	/* Stop the transmit and receive processes. */
   5528 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5529 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5530 	sc->sc_rctl &= ~RCTL_EN;
   5531 
   5532 	/*
   5533 	 * Clear the interrupt mask to ensure the device cannot assert its
   5534 	 * interrupt line.
   5535 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5536 	 * service any currently pending or shared interrupt.
   5537 	 */
   5538 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5539 	sc->sc_icr = 0;
   5540 	if (sc->sc_nintrs > 1) {
   5541 		if (sc->sc_type != WM_T_82574) {
   5542 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5543 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5544 		} else
   5545 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5546 	}
   5547 
   5548 	/* Release any queued transmit buffers. */
   5549 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5550 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5551 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5552 		mutex_enter(txq->txq_lock);
   5553 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5554 			txs = &txq->txq_soft[i];
   5555 			if (txs->txs_mbuf != NULL) {
   5556 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5557 				m_freem(txs->txs_mbuf);
   5558 				txs->txs_mbuf = NULL;
   5559 			}
   5560 		}
   5561 		mutex_exit(txq->txq_lock);
   5562 	}
   5563 
   5564 	/* Mark the interface as down and cancel the watchdog timer. */
   5565 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5566 	ifp->if_timer = 0;
   5567 
   5568 	if (disable) {
   5569 		for (i = 0; i < sc->sc_nqueues; i++) {
   5570 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5571 			mutex_enter(rxq->rxq_lock);
   5572 			wm_rxdrain(rxq);
   5573 			mutex_exit(rxq->rxq_lock);
   5574 		}
   5575 	}
   5576 
   5577 #if 0 /* notyet */
   5578 	if (sc->sc_type >= WM_T_82544)
   5579 		CSR_WRITE(sc, WMREG_WUC, 0);
   5580 #endif
   5581 }
   5582 
   5583 static void
   5584 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5585 {
   5586 	struct mbuf *m;
   5587 	int i;
   5588 
   5589 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5590 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5591 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5592 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5593 		    m->m_data, m->m_len, m->m_flags);
   5594 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5595 	    i, i == 1 ? "" : "s");
   5596 }
   5597 
   5598 /*
   5599  * wm_82547_txfifo_stall:
   5600  *
   5601  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5602  *	reset the FIFO pointers, and restart packet transmission.
   5603  */
   5604 static void
   5605 wm_82547_txfifo_stall(void *arg)
   5606 {
   5607 	struct wm_softc *sc = arg;
   5608 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5609 
   5610 	mutex_enter(txq->txq_lock);
   5611 
   5612 	if (txq->txq_stopping)
   5613 		goto out;
   5614 
   5615 	if (txq->txq_fifo_stall) {
   5616 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5617 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5618 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5619 			/*
   5620 			 * Packets have drained.  Stop transmitter, reset
   5621 			 * FIFO pointers, restart transmitter, and kick
   5622 			 * the packet queue.
   5623 			 */
   5624 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5625 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5626 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5627 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5628 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5629 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5630 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5631 			CSR_WRITE_FLUSH(sc);
   5632 
   5633 			txq->txq_fifo_head = 0;
   5634 			txq->txq_fifo_stall = 0;
   5635 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5636 		} else {
   5637 			/*
   5638 			 * Still waiting for packets to drain; try again in
   5639 			 * another tick.
   5640 			 */
   5641 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5642 		}
   5643 	}
   5644 
   5645 out:
   5646 	mutex_exit(txq->txq_lock);
   5647 }
   5648 
   5649 /*
   5650  * wm_82547_txfifo_bugchk:
   5651  *
   5652  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5653  *	prevent enqueueing a packet that would wrap around the end
   5654  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5655  *
   5656  *	We do this by checking the amount of space before the end
   5657  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5658  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5659  *	the internal FIFO pointers to the beginning, and restart
   5660  *	transmission on the interface.
   5661  */
   5662 #define	WM_FIFO_HDR		0x10
   5663 #define	WM_82547_PAD_LEN	0x3e0
   5664 static int
   5665 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5666 {
   5667 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5668 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5669 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5670 
   5671 	/* Just return if already stalled. */
   5672 	if (txq->txq_fifo_stall)
   5673 		return 1;
   5674 
   5675 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5676 		/* Stall only occurs in half-duplex mode. */
   5677 		goto send_packet;
   5678 	}
   5679 
   5680 	if (len >= WM_82547_PAD_LEN + space) {
   5681 		txq->txq_fifo_stall = 1;
   5682 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5683 		return 1;
   5684 	}
   5685 
   5686  send_packet:
   5687 	txq->txq_fifo_head += len;
   5688 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5689 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5690 
   5691 	return 0;
   5692 }
   5693 
   5694 static int
   5695 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5696 {
   5697 	int error;
   5698 
   5699 	/*
   5700 	 * Allocate the control data structures, and create and load the
   5701 	 * DMA map for it.
   5702 	 *
   5703 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5704 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5705 	 * both sets within the same 4G segment.
   5706 	 */
   5707 	if (sc->sc_type < WM_T_82544)
   5708 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5709 	else
   5710 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5711 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5712 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5713 	else
   5714 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5715 
   5716 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5717 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5718 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5719 		aprint_error_dev(sc->sc_dev,
   5720 		    "unable to allocate TX control data, error = %d\n",
   5721 		    error);
   5722 		goto fail_0;
   5723 	}
   5724 
   5725 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5726 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5727 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5728 		aprint_error_dev(sc->sc_dev,
   5729 		    "unable to map TX control data, error = %d\n", error);
   5730 		goto fail_1;
   5731 	}
   5732 
   5733 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5734 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5735 		aprint_error_dev(sc->sc_dev,
   5736 		    "unable to create TX control data DMA map, error = %d\n",
   5737 		    error);
   5738 		goto fail_2;
   5739 	}
   5740 
   5741 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5742 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5743 		aprint_error_dev(sc->sc_dev,
   5744 		    "unable to load TX control data DMA map, error = %d\n",
   5745 		    error);
   5746 		goto fail_3;
   5747 	}
   5748 
   5749 	return 0;
   5750 
   5751  fail_3:
   5752 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5753  fail_2:
   5754 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5755 	    WM_TXDESCS_SIZE(txq));
   5756  fail_1:
   5757 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5758  fail_0:
   5759 	return error;
   5760 }
   5761 
   5762 static void
   5763 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5764 {
   5765 
   5766 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5767 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5768 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5769 	    WM_TXDESCS_SIZE(txq));
   5770 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5771 }
   5772 
   5773 static int
   5774 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5775 {
   5776 	int error;
   5777 	size_t rxq_descs_size;
   5778 
   5779 	/*
   5780 	 * Allocate the control data structures, and create and load the
   5781 	 * DMA map for it.
   5782 	 *
   5783 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5784 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5785 	 * both sets within the same 4G segment.
   5786 	 */
   5787 	rxq->rxq_ndesc = WM_NRXDESC;
   5788 	if (sc->sc_type == WM_T_82574)
   5789 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5790 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5791 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5792 	else
   5793 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5794 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5795 
   5796 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5797 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5798 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5799 		aprint_error_dev(sc->sc_dev,
   5800 		    "unable to allocate RX control data, error = %d\n",
   5801 		    error);
   5802 		goto fail_0;
   5803 	}
   5804 
   5805 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5806 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5807 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5808 		aprint_error_dev(sc->sc_dev,
   5809 		    "unable to map RX control data, error = %d\n", error);
   5810 		goto fail_1;
   5811 	}
   5812 
   5813 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5814 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5815 		aprint_error_dev(sc->sc_dev,
   5816 		    "unable to create RX control data DMA map, error = %d\n",
   5817 		    error);
   5818 		goto fail_2;
   5819 	}
   5820 
   5821 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5822 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5823 		aprint_error_dev(sc->sc_dev,
   5824 		    "unable to load RX control data DMA map, error = %d\n",
   5825 		    error);
   5826 		goto fail_3;
   5827 	}
   5828 
   5829 	return 0;
   5830 
   5831  fail_3:
   5832 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5833  fail_2:
   5834 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5835 	    rxq_descs_size);
   5836  fail_1:
   5837 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5838  fail_0:
   5839 	return error;
   5840 }
   5841 
   5842 static void
   5843 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5844 {
   5845 
   5846 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5847 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5848 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5849 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5850 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5851 }
   5852 
   5853 
   5854 static int
   5855 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5856 {
   5857 	int i, error;
   5858 
   5859 	/* Create the transmit buffer DMA maps. */
   5860 	WM_TXQUEUELEN(txq) =
   5861 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5862 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5863 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5864 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5865 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5866 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5867 			aprint_error_dev(sc->sc_dev,
   5868 			    "unable to create Tx DMA map %d, error = %d\n",
   5869 			    i, error);
   5870 			goto fail;
   5871 		}
   5872 	}
   5873 
   5874 	return 0;
   5875 
   5876  fail:
   5877 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5878 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5879 			bus_dmamap_destroy(sc->sc_dmat,
   5880 			    txq->txq_soft[i].txs_dmamap);
   5881 	}
   5882 	return error;
   5883 }
   5884 
   5885 static void
   5886 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5887 {
   5888 	int i;
   5889 
   5890 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5891 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5892 			bus_dmamap_destroy(sc->sc_dmat,
   5893 			    txq->txq_soft[i].txs_dmamap);
   5894 	}
   5895 }
   5896 
   5897 static int
   5898 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5899 {
   5900 	int i, error;
   5901 
   5902 	/* Create the receive buffer DMA maps. */
   5903 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5904 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5905 			    MCLBYTES, 0, 0,
   5906 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5907 			aprint_error_dev(sc->sc_dev,
   5908 			    "unable to create Rx DMA map %d error = %d\n",
   5909 			    i, error);
   5910 			goto fail;
   5911 		}
   5912 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5913 	}
   5914 
   5915 	return 0;
   5916 
   5917  fail:
   5918 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5919 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5920 			bus_dmamap_destroy(sc->sc_dmat,
   5921 			    rxq->rxq_soft[i].rxs_dmamap);
   5922 	}
   5923 	return error;
   5924 }
   5925 
   5926 static void
   5927 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5928 {
   5929 	int i;
   5930 
   5931 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5932 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5933 			bus_dmamap_destroy(sc->sc_dmat,
   5934 			    rxq->rxq_soft[i].rxs_dmamap);
   5935 	}
   5936 }
   5937 
   5938 /*
   5939  * wm_alloc_quques:
   5940  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5941  */
   5942 static int
   5943 wm_alloc_txrx_queues(struct wm_softc *sc)
   5944 {
   5945 	int i, error, tx_done, rx_done;
   5946 
   5947 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5948 	    KM_SLEEP);
   5949 	if (sc->sc_queue == NULL) {
   5950 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5951 		error = ENOMEM;
   5952 		goto fail_0;
   5953 	}
   5954 
   5955 	/*
   5956 	 * For transmission
   5957 	 */
   5958 	error = 0;
   5959 	tx_done = 0;
   5960 	for (i = 0; i < sc->sc_nqueues; i++) {
   5961 #ifdef WM_EVENT_COUNTERS
   5962 		int j;
   5963 		const char *xname;
   5964 #endif
   5965 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5966 		txq->txq_sc = sc;
   5967 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5968 
   5969 		error = wm_alloc_tx_descs(sc, txq);
   5970 		if (error)
   5971 			break;
   5972 		error = wm_alloc_tx_buffer(sc, txq);
   5973 		if (error) {
   5974 			wm_free_tx_descs(sc, txq);
   5975 			break;
   5976 		}
   5977 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5978 		if (txq->txq_interq == NULL) {
   5979 			wm_free_tx_descs(sc, txq);
   5980 			wm_free_tx_buffer(sc, txq);
   5981 			error = ENOMEM;
   5982 			break;
   5983 		}
   5984 
   5985 #ifdef WM_EVENT_COUNTERS
   5986 		xname = device_xname(sc->sc_dev);
   5987 
   5988 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5989 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5990 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5991 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5992 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5993 
   5994 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5995 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5996 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5997 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5998 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5999 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6000 
   6001 		for (j = 0; j < WM_NTXSEGS; j++) {
   6002 			snprintf(txq->txq_txseg_evcnt_names[j],
   6003 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6004 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6005 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6006 		}
   6007 
   6008 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6009 
   6010 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6011 #endif /* WM_EVENT_COUNTERS */
   6012 
   6013 		tx_done++;
   6014 	}
   6015 	if (error)
   6016 		goto fail_1;
   6017 
   6018 	/*
   6019 	 * For recieve
   6020 	 */
   6021 	error = 0;
   6022 	rx_done = 0;
   6023 	for (i = 0; i < sc->sc_nqueues; i++) {
   6024 #ifdef WM_EVENT_COUNTERS
   6025 		const char *xname;
   6026 #endif
   6027 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6028 		rxq->rxq_sc = sc;
   6029 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6030 
   6031 		error = wm_alloc_rx_descs(sc, rxq);
   6032 		if (error)
   6033 			break;
   6034 
   6035 		error = wm_alloc_rx_buffer(sc, rxq);
   6036 		if (error) {
   6037 			wm_free_rx_descs(sc, rxq);
   6038 			break;
   6039 		}
   6040 
   6041 #ifdef WM_EVENT_COUNTERS
   6042 		xname = device_xname(sc->sc_dev);
   6043 
   6044 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6045 
   6046 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6047 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6048 #endif /* WM_EVENT_COUNTERS */
   6049 
   6050 		rx_done++;
   6051 	}
   6052 	if (error)
   6053 		goto fail_2;
   6054 
   6055 	return 0;
   6056 
   6057  fail_2:
   6058 	for (i = 0; i < rx_done; i++) {
   6059 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6060 		wm_free_rx_buffer(sc, rxq);
   6061 		wm_free_rx_descs(sc, rxq);
   6062 		if (rxq->rxq_lock)
   6063 			mutex_obj_free(rxq->rxq_lock);
   6064 	}
   6065  fail_1:
   6066 	for (i = 0; i < tx_done; i++) {
   6067 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6068 		pcq_destroy(txq->txq_interq);
   6069 		wm_free_tx_buffer(sc, txq);
   6070 		wm_free_tx_descs(sc, txq);
   6071 		if (txq->txq_lock)
   6072 			mutex_obj_free(txq->txq_lock);
   6073 	}
   6074 
   6075 	kmem_free(sc->sc_queue,
   6076 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6077  fail_0:
   6078 	return error;
   6079 }
   6080 
   6081 /*
   6082  * wm_free_quques:
   6083  *	Free {tx,rx}descs and {tx,rx} buffers
   6084  */
   6085 static void
   6086 wm_free_txrx_queues(struct wm_softc *sc)
   6087 {
   6088 	int i;
   6089 
   6090 	for (i = 0; i < sc->sc_nqueues; i++) {
   6091 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6092 
   6093 #ifdef WM_EVENT_COUNTERS
   6094 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6095 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6096 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6097 #endif /* WM_EVENT_COUNTERS */
   6098 
   6099 		wm_free_rx_buffer(sc, rxq);
   6100 		wm_free_rx_descs(sc, rxq);
   6101 		if (rxq->rxq_lock)
   6102 			mutex_obj_free(rxq->rxq_lock);
   6103 	}
   6104 
   6105 	for (i = 0; i < sc->sc_nqueues; i++) {
   6106 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6107 		struct mbuf *m;
   6108 #ifdef WM_EVENT_COUNTERS
   6109 		int j;
   6110 
   6111 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6112 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6113 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6114 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6115 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6116 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6117 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6118 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6119 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6120 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6121 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6122 
   6123 		for (j = 0; j < WM_NTXSEGS; j++)
   6124 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6125 
   6126 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6127 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6128 #endif /* WM_EVENT_COUNTERS */
   6129 
   6130 		/* drain txq_interq */
   6131 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6132 			m_freem(m);
   6133 		pcq_destroy(txq->txq_interq);
   6134 
   6135 		wm_free_tx_buffer(sc, txq);
   6136 		wm_free_tx_descs(sc, txq);
   6137 		if (txq->txq_lock)
   6138 			mutex_obj_free(txq->txq_lock);
   6139 	}
   6140 
   6141 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6142 }
   6143 
   6144 static void
   6145 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6146 {
   6147 
   6148 	KASSERT(mutex_owned(txq->txq_lock));
   6149 
   6150 	/* Initialize the transmit descriptor ring. */
   6151 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6152 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6153 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6154 	txq->txq_free = WM_NTXDESC(txq);
   6155 	txq->txq_next = 0;
   6156 }
   6157 
   6158 static void
   6159 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6160     struct wm_txqueue *txq)
   6161 {
   6162 
   6163 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6164 		device_xname(sc->sc_dev), __func__));
   6165 	KASSERT(mutex_owned(txq->txq_lock));
   6166 
   6167 	if (sc->sc_type < WM_T_82543) {
   6168 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6169 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6170 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6171 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6172 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6173 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6174 	} else {
   6175 		int qid = wmq->wmq_id;
   6176 
   6177 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6178 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6179 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6180 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6181 
   6182 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6183 			/*
   6184 			 * Don't write TDT before TCTL.EN is set.
   6185 			 * See the document.
   6186 			 */
   6187 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6188 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6189 			    | TXDCTL_WTHRESH(0));
   6190 		else {
   6191 			/* XXX should update with AIM? */
   6192 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6193 			if (sc->sc_type >= WM_T_82540) {
   6194 				/* should be same */
   6195 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6196 			}
   6197 
   6198 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6199 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6200 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6201 		}
   6202 	}
   6203 }
   6204 
   6205 static void
   6206 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6207 {
   6208 	int i;
   6209 
   6210 	KASSERT(mutex_owned(txq->txq_lock));
   6211 
   6212 	/* Initialize the transmit job descriptors. */
   6213 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6214 		txq->txq_soft[i].txs_mbuf = NULL;
   6215 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6216 	txq->txq_snext = 0;
   6217 	txq->txq_sdirty = 0;
   6218 }
   6219 
   6220 static void
   6221 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6222     struct wm_txqueue *txq)
   6223 {
   6224 
   6225 	KASSERT(mutex_owned(txq->txq_lock));
   6226 
   6227 	/*
   6228 	 * Set up some register offsets that are different between
   6229 	 * the i82542 and the i82543 and later chips.
   6230 	 */
   6231 	if (sc->sc_type < WM_T_82543)
   6232 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6233 	else
   6234 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6235 
   6236 	wm_init_tx_descs(sc, txq);
   6237 	wm_init_tx_regs(sc, wmq, txq);
   6238 	wm_init_tx_buffer(sc, txq);
   6239 }
   6240 
   6241 static void
   6242 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6243     struct wm_rxqueue *rxq)
   6244 {
   6245 
   6246 	KASSERT(mutex_owned(rxq->rxq_lock));
   6247 
   6248 	/*
   6249 	 * Initialize the receive descriptor and receive job
   6250 	 * descriptor rings.
   6251 	 */
   6252 	if (sc->sc_type < WM_T_82543) {
   6253 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6254 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6255 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6256 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6257 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6258 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6259 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6260 
   6261 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6262 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6263 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6264 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6265 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6266 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6267 	} else {
   6268 		int qid = wmq->wmq_id;
   6269 
   6270 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6271 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6272 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6273 
   6274 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6275 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6276 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6277 
   6278 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6279 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6280 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6281 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6282 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6283 			    | RXDCTL_WTHRESH(1));
   6284 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6285 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6286 		} else {
   6287 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6288 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6289 			/* XXX should update with AIM? */
   6290 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6291 			/* MUST be same */
   6292 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6293 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6294 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6295 		}
   6296 	}
   6297 }
   6298 
   6299 static int
   6300 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6301 {
   6302 	struct wm_rxsoft *rxs;
   6303 	int error, i;
   6304 
   6305 	KASSERT(mutex_owned(rxq->rxq_lock));
   6306 
   6307 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6308 		rxs = &rxq->rxq_soft[i];
   6309 		if (rxs->rxs_mbuf == NULL) {
   6310 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6311 				log(LOG_ERR, "%s: unable to allocate or map "
   6312 				    "rx buffer %d, error = %d\n",
   6313 				    device_xname(sc->sc_dev), i, error);
   6314 				/*
   6315 				 * XXX Should attempt to run with fewer receive
   6316 				 * XXX buffers instead of just failing.
   6317 				 */
   6318 				wm_rxdrain(rxq);
   6319 				return ENOMEM;
   6320 			}
   6321 		} else {
   6322 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6323 				wm_init_rxdesc(rxq, i);
   6324 			/*
   6325 			 * For 82575 and newer device, the RX descriptors
   6326 			 * must be initialized after the setting of RCTL.EN in
   6327 			 * wm_set_filter()
   6328 			 */
   6329 		}
   6330 	}
   6331 	rxq->rxq_ptr = 0;
   6332 	rxq->rxq_discard = 0;
   6333 	WM_RXCHAIN_RESET(rxq);
   6334 
   6335 	return 0;
   6336 }
   6337 
   6338 static int
   6339 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6340     struct wm_rxqueue *rxq)
   6341 {
   6342 
   6343 	KASSERT(mutex_owned(rxq->rxq_lock));
   6344 
   6345 	/*
   6346 	 * Set up some register offsets that are different between
   6347 	 * the i82542 and the i82543 and later chips.
   6348 	 */
   6349 	if (sc->sc_type < WM_T_82543)
   6350 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6351 	else
   6352 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6353 
   6354 	wm_init_rx_regs(sc, wmq, rxq);
   6355 	return wm_init_rx_buffer(sc, rxq);
   6356 }
   6357 
   6358 /*
   6359  * wm_init_quques:
   6360  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6361  */
   6362 static int
   6363 wm_init_txrx_queues(struct wm_softc *sc)
   6364 {
   6365 	int i, error = 0;
   6366 
   6367 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6368 		device_xname(sc->sc_dev), __func__));
   6369 
   6370 	for (i = 0; i < sc->sc_nqueues; i++) {
   6371 		struct wm_queue *wmq = &sc->sc_queue[i];
   6372 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6373 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6374 
   6375 		/*
   6376 		 * TODO
   6377 		 * Currently, use constant variable instead of AIM.
   6378 		 * Furthermore, the interrupt interval of multiqueue which use
   6379 		 * polling mode is less than default value.
   6380 		 * More tuning and AIM are required.
   6381 		 */
   6382 		if (sc->sc_nqueues > 1)
   6383 			wmq->wmq_itr = 50;
   6384 		else
   6385 			wmq->wmq_itr = sc->sc_itr_init;
   6386 		wmq->wmq_set_itr = true;
   6387 
   6388 		mutex_enter(txq->txq_lock);
   6389 		wm_init_tx_queue(sc, wmq, txq);
   6390 		mutex_exit(txq->txq_lock);
   6391 
   6392 		mutex_enter(rxq->rxq_lock);
   6393 		error = wm_init_rx_queue(sc, wmq, rxq);
   6394 		mutex_exit(rxq->rxq_lock);
   6395 		if (error)
   6396 			break;
   6397 	}
   6398 
   6399 	return error;
   6400 }
   6401 
   6402 /*
   6403  * wm_tx_offload:
   6404  *
   6405  *	Set up TCP/IP checksumming parameters for the
   6406  *	specified packet.
   6407  */
   6408 static int
   6409 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6410     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6411 {
   6412 	struct mbuf *m0 = txs->txs_mbuf;
   6413 	struct livengood_tcpip_ctxdesc *t;
   6414 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6415 	uint32_t ipcse;
   6416 	struct ether_header *eh;
   6417 	int offset, iphl;
   6418 	uint8_t fields;
   6419 
   6420 	/*
   6421 	 * XXX It would be nice if the mbuf pkthdr had offset
   6422 	 * fields for the protocol headers.
   6423 	 */
   6424 
   6425 	eh = mtod(m0, struct ether_header *);
   6426 	switch (htons(eh->ether_type)) {
   6427 	case ETHERTYPE_IP:
   6428 	case ETHERTYPE_IPV6:
   6429 		offset = ETHER_HDR_LEN;
   6430 		break;
   6431 
   6432 	case ETHERTYPE_VLAN:
   6433 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6434 		break;
   6435 
   6436 	default:
   6437 		/*
   6438 		 * Don't support this protocol or encapsulation.
   6439 		 */
   6440 		*fieldsp = 0;
   6441 		*cmdp = 0;
   6442 		return 0;
   6443 	}
   6444 
   6445 	if ((m0->m_pkthdr.csum_flags &
   6446 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6447 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6448 	} else {
   6449 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6450 	}
   6451 	ipcse = offset + iphl - 1;
   6452 
   6453 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6454 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6455 	seg = 0;
   6456 	fields = 0;
   6457 
   6458 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6459 		int hlen = offset + iphl;
   6460 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6461 
   6462 		if (__predict_false(m0->m_len <
   6463 				    (hlen + sizeof(struct tcphdr)))) {
   6464 			/*
   6465 			 * TCP/IP headers are not in the first mbuf; we need
   6466 			 * to do this the slow and painful way.  Let's just
   6467 			 * hope this doesn't happen very often.
   6468 			 */
   6469 			struct tcphdr th;
   6470 
   6471 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6472 
   6473 			m_copydata(m0, hlen, sizeof(th), &th);
   6474 			if (v4) {
   6475 				struct ip ip;
   6476 
   6477 				m_copydata(m0, offset, sizeof(ip), &ip);
   6478 				ip.ip_len = 0;
   6479 				m_copyback(m0,
   6480 				    offset + offsetof(struct ip, ip_len),
   6481 				    sizeof(ip.ip_len), &ip.ip_len);
   6482 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6483 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6484 			} else {
   6485 				struct ip6_hdr ip6;
   6486 
   6487 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6488 				ip6.ip6_plen = 0;
   6489 				m_copyback(m0,
   6490 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6491 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6492 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6493 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6494 			}
   6495 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6496 			    sizeof(th.th_sum), &th.th_sum);
   6497 
   6498 			hlen += th.th_off << 2;
   6499 		} else {
   6500 			/*
   6501 			 * TCP/IP headers are in the first mbuf; we can do
   6502 			 * this the easy way.
   6503 			 */
   6504 			struct tcphdr *th;
   6505 
   6506 			if (v4) {
   6507 				struct ip *ip =
   6508 				    (void *)(mtod(m0, char *) + offset);
   6509 				th = (void *)(mtod(m0, char *) + hlen);
   6510 
   6511 				ip->ip_len = 0;
   6512 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6513 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6514 			} else {
   6515 				struct ip6_hdr *ip6 =
   6516 				    (void *)(mtod(m0, char *) + offset);
   6517 				th = (void *)(mtod(m0, char *) + hlen);
   6518 
   6519 				ip6->ip6_plen = 0;
   6520 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6521 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6522 			}
   6523 			hlen += th->th_off << 2;
   6524 		}
   6525 
   6526 		if (v4) {
   6527 			WM_Q_EVCNT_INCR(txq, txtso);
   6528 			cmdlen |= WTX_TCPIP_CMD_IP;
   6529 		} else {
   6530 			WM_Q_EVCNT_INCR(txq, txtso6);
   6531 			ipcse = 0;
   6532 		}
   6533 		cmd |= WTX_TCPIP_CMD_TSE;
   6534 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6535 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6536 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6537 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6538 	}
   6539 
   6540 	/*
   6541 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6542 	 * offload feature, if we load the context descriptor, we
   6543 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6544 	 */
   6545 
   6546 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6547 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6548 	    WTX_TCPIP_IPCSE(ipcse);
   6549 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6550 		WM_Q_EVCNT_INCR(txq, txipsum);
   6551 		fields |= WTX_IXSM;
   6552 	}
   6553 
   6554 	offset += iphl;
   6555 
   6556 	if (m0->m_pkthdr.csum_flags &
   6557 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6558 		WM_Q_EVCNT_INCR(txq, txtusum);
   6559 		fields |= WTX_TXSM;
   6560 		tucs = WTX_TCPIP_TUCSS(offset) |
   6561 		    WTX_TCPIP_TUCSO(offset +
   6562 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6563 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6564 	} else if ((m0->m_pkthdr.csum_flags &
   6565 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6566 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6567 		fields |= WTX_TXSM;
   6568 		tucs = WTX_TCPIP_TUCSS(offset) |
   6569 		    WTX_TCPIP_TUCSO(offset +
   6570 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6571 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6572 	} else {
   6573 		/* Just initialize it to a valid TCP context. */
   6574 		tucs = WTX_TCPIP_TUCSS(offset) |
   6575 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6576 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6577 	}
   6578 
   6579 	/*
   6580 	 * We don't have to write context descriptor for every packet
   6581 	 * except for 82574. For 82574, we must write context descriptor
   6582 	 * for every packet when we use two descriptor queues.
   6583 	 * It would be overhead to write context descriptor for every packet,
   6584 	 * however it does not cause problems.
   6585 	 */
   6586 	/* Fill in the context descriptor. */
   6587 	t = (struct livengood_tcpip_ctxdesc *)
   6588 	    &txq->txq_descs[txq->txq_next];
   6589 	t->tcpip_ipcs = htole32(ipcs);
   6590 	t->tcpip_tucs = htole32(tucs);
   6591 	t->tcpip_cmdlen = htole32(cmdlen);
   6592 	t->tcpip_seg = htole32(seg);
   6593 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6594 
   6595 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6596 	txs->txs_ndesc++;
   6597 
   6598 	*cmdp = cmd;
   6599 	*fieldsp = fields;
   6600 
   6601 	return 0;
   6602 }
   6603 
   6604 static inline int
   6605 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6606 {
   6607 	struct wm_softc *sc = ifp->if_softc;
   6608 	u_int cpuid = cpu_index(curcpu());
   6609 
   6610 	/*
   6611 	 * Currently, simple distribute strategy.
   6612 	 * TODO:
   6613 	 * distribute by flowid(RSS has value).
   6614 	 */
   6615         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6616 }
   6617 
   6618 /*
   6619  * wm_start:		[ifnet interface function]
   6620  *
   6621  *	Start packet transmission on the interface.
   6622  */
   6623 static void
   6624 wm_start(struct ifnet *ifp)
   6625 {
   6626 	struct wm_softc *sc = ifp->if_softc;
   6627 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6628 
   6629 #ifdef WM_MPSAFE
   6630 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6631 #endif
   6632 	/*
   6633 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6634 	 */
   6635 
   6636 	mutex_enter(txq->txq_lock);
   6637 	if (!txq->txq_stopping)
   6638 		wm_start_locked(ifp);
   6639 	mutex_exit(txq->txq_lock);
   6640 }
   6641 
   6642 static void
   6643 wm_start_locked(struct ifnet *ifp)
   6644 {
   6645 	struct wm_softc *sc = ifp->if_softc;
   6646 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6647 
   6648 	wm_send_common_locked(ifp, txq, false);
   6649 }
   6650 
   6651 static int
   6652 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6653 {
   6654 	int qid;
   6655 	struct wm_softc *sc = ifp->if_softc;
   6656 	struct wm_txqueue *txq;
   6657 
   6658 	qid = wm_select_txqueue(ifp, m);
   6659 	txq = &sc->sc_queue[qid].wmq_txq;
   6660 
   6661 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6662 		m_freem(m);
   6663 		WM_Q_EVCNT_INCR(txq, txdrop);
   6664 		return ENOBUFS;
   6665 	}
   6666 
   6667 	/*
   6668 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6669 	 */
   6670 	ifp->if_obytes += m->m_pkthdr.len;
   6671 	if (m->m_flags & M_MCAST)
   6672 		ifp->if_omcasts++;
   6673 
   6674 	if (mutex_tryenter(txq->txq_lock)) {
   6675 		if (!txq->txq_stopping)
   6676 			wm_transmit_locked(ifp, txq);
   6677 		mutex_exit(txq->txq_lock);
   6678 	}
   6679 
   6680 	return 0;
   6681 }
   6682 
   6683 static void
   6684 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6685 {
   6686 
   6687 	wm_send_common_locked(ifp, txq, true);
   6688 }
   6689 
   6690 static void
   6691 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6692     bool is_transmit)
   6693 {
   6694 	struct wm_softc *sc = ifp->if_softc;
   6695 	struct mbuf *m0;
   6696 	struct m_tag *mtag;
   6697 	struct wm_txsoft *txs;
   6698 	bus_dmamap_t dmamap;
   6699 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6700 	bus_addr_t curaddr;
   6701 	bus_size_t seglen, curlen;
   6702 	uint32_t cksumcmd;
   6703 	uint8_t cksumfields;
   6704 
   6705 	KASSERT(mutex_owned(txq->txq_lock));
   6706 
   6707 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6708 		return;
   6709 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6710 		return;
   6711 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6712 		return;
   6713 
   6714 	/* Remember the previous number of free descriptors. */
   6715 	ofree = txq->txq_free;
   6716 
   6717 	/*
   6718 	 * Loop through the send queue, setting up transmit descriptors
   6719 	 * until we drain the queue, or use up all available transmit
   6720 	 * descriptors.
   6721 	 */
   6722 	for (;;) {
   6723 		m0 = NULL;
   6724 
   6725 		/* Get a work queue entry. */
   6726 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6727 			wm_txeof(sc, txq);
   6728 			if (txq->txq_sfree == 0) {
   6729 				DPRINTF(WM_DEBUG_TX,
   6730 				    ("%s: TX: no free job descriptors\n",
   6731 					device_xname(sc->sc_dev)));
   6732 				WM_Q_EVCNT_INCR(txq, txsstall);
   6733 				break;
   6734 			}
   6735 		}
   6736 
   6737 		/* Grab a packet off the queue. */
   6738 		if (is_transmit)
   6739 			m0 = pcq_get(txq->txq_interq);
   6740 		else
   6741 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6742 		if (m0 == NULL)
   6743 			break;
   6744 
   6745 		DPRINTF(WM_DEBUG_TX,
   6746 		    ("%s: TX: have packet to transmit: %p\n",
   6747 		    device_xname(sc->sc_dev), m0));
   6748 
   6749 		txs = &txq->txq_soft[txq->txq_snext];
   6750 		dmamap = txs->txs_dmamap;
   6751 
   6752 		use_tso = (m0->m_pkthdr.csum_flags &
   6753 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6754 
   6755 		/*
   6756 		 * So says the Linux driver:
   6757 		 * The controller does a simple calculation to make sure
   6758 		 * there is enough room in the FIFO before initiating the
   6759 		 * DMA for each buffer.  The calc is:
   6760 		 *	4 = ceil(buffer len / MSS)
   6761 		 * To make sure we don't overrun the FIFO, adjust the max
   6762 		 * buffer len if the MSS drops.
   6763 		 */
   6764 		dmamap->dm_maxsegsz =
   6765 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6766 		    ? m0->m_pkthdr.segsz << 2
   6767 		    : WTX_MAX_LEN;
   6768 
   6769 		/*
   6770 		 * Load the DMA map.  If this fails, the packet either
   6771 		 * didn't fit in the allotted number of segments, or we
   6772 		 * were short on resources.  For the too-many-segments
   6773 		 * case, we simply report an error and drop the packet,
   6774 		 * since we can't sanely copy a jumbo packet to a single
   6775 		 * buffer.
   6776 		 */
   6777 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6778 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6779 		if (error) {
   6780 			if (error == EFBIG) {
   6781 				WM_Q_EVCNT_INCR(txq, txdrop);
   6782 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6783 				    "DMA segments, dropping...\n",
   6784 				    device_xname(sc->sc_dev));
   6785 				wm_dump_mbuf_chain(sc, m0);
   6786 				m_freem(m0);
   6787 				continue;
   6788 			}
   6789 			/*  Short on resources, just stop for now. */
   6790 			DPRINTF(WM_DEBUG_TX,
   6791 			    ("%s: TX: dmamap load failed: %d\n",
   6792 			    device_xname(sc->sc_dev), error));
   6793 			break;
   6794 		}
   6795 
   6796 		segs_needed = dmamap->dm_nsegs;
   6797 		if (use_tso) {
   6798 			/* For sentinel descriptor; see below. */
   6799 			segs_needed++;
   6800 		}
   6801 
   6802 		/*
   6803 		 * Ensure we have enough descriptors free to describe
   6804 		 * the packet.  Note, we always reserve one descriptor
   6805 		 * at the end of the ring due to the semantics of the
   6806 		 * TDT register, plus one more in the event we need
   6807 		 * to load offload context.
   6808 		 */
   6809 		if (segs_needed > txq->txq_free - 2) {
   6810 			/*
   6811 			 * Not enough free descriptors to transmit this
   6812 			 * packet.  We haven't committed anything yet,
   6813 			 * so just unload the DMA map, put the packet
   6814 			 * pack on the queue, and punt.  Notify the upper
   6815 			 * layer that there are no more slots left.
   6816 			 */
   6817 			DPRINTF(WM_DEBUG_TX,
   6818 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6819 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6820 			    segs_needed, txq->txq_free - 1));
   6821 			if (!is_transmit)
   6822 				ifp->if_flags |= IFF_OACTIVE;
   6823 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6824 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6825 			WM_Q_EVCNT_INCR(txq, txdstall);
   6826 			break;
   6827 		}
   6828 
   6829 		/*
   6830 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6831 		 * once we know we can transmit the packet, since we
   6832 		 * do some internal FIFO space accounting here.
   6833 		 */
   6834 		if (sc->sc_type == WM_T_82547 &&
   6835 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6836 			DPRINTF(WM_DEBUG_TX,
   6837 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6838 			    device_xname(sc->sc_dev)));
   6839 			if (!is_transmit)
   6840 				ifp->if_flags |= IFF_OACTIVE;
   6841 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6842 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6843 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6844 			break;
   6845 		}
   6846 
   6847 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6848 
   6849 		DPRINTF(WM_DEBUG_TX,
   6850 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6851 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6852 
   6853 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6854 
   6855 		/*
   6856 		 * Store a pointer to the packet so that we can free it
   6857 		 * later.
   6858 		 *
   6859 		 * Initially, we consider the number of descriptors the
   6860 		 * packet uses the number of DMA segments.  This may be
   6861 		 * incremented by 1 if we do checksum offload (a descriptor
   6862 		 * is used to set the checksum context).
   6863 		 */
   6864 		txs->txs_mbuf = m0;
   6865 		txs->txs_firstdesc = txq->txq_next;
   6866 		txs->txs_ndesc = segs_needed;
   6867 
   6868 		/* Set up offload parameters for this packet. */
   6869 		if (m0->m_pkthdr.csum_flags &
   6870 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6871 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6872 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6873 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6874 					  &cksumfields) != 0) {
   6875 				/* Error message already displayed. */
   6876 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6877 				continue;
   6878 			}
   6879 		} else {
   6880 			cksumcmd = 0;
   6881 			cksumfields = 0;
   6882 		}
   6883 
   6884 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6885 
   6886 		/* Sync the DMA map. */
   6887 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6888 		    BUS_DMASYNC_PREWRITE);
   6889 
   6890 		/* Initialize the transmit descriptor. */
   6891 		for (nexttx = txq->txq_next, seg = 0;
   6892 		     seg < dmamap->dm_nsegs; seg++) {
   6893 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6894 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6895 			     seglen != 0;
   6896 			     curaddr += curlen, seglen -= curlen,
   6897 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6898 				curlen = seglen;
   6899 
   6900 				/*
   6901 				 * So says the Linux driver:
   6902 				 * Work around for premature descriptor
   6903 				 * write-backs in TSO mode.  Append a
   6904 				 * 4-byte sentinel descriptor.
   6905 				 */
   6906 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6907 				    curlen > 8)
   6908 					curlen -= 4;
   6909 
   6910 				wm_set_dma_addr(
   6911 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6912 				txq->txq_descs[nexttx].wtx_cmdlen
   6913 				    = htole32(cksumcmd | curlen);
   6914 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6915 				    = 0;
   6916 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6917 				    = cksumfields;
   6918 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6919 				lasttx = nexttx;
   6920 
   6921 				DPRINTF(WM_DEBUG_TX,
   6922 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6923 				     "len %#04zx\n",
   6924 				    device_xname(sc->sc_dev), nexttx,
   6925 				    (uint64_t)curaddr, curlen));
   6926 			}
   6927 		}
   6928 
   6929 		KASSERT(lasttx != -1);
   6930 
   6931 		/*
   6932 		 * Set up the command byte on the last descriptor of
   6933 		 * the packet.  If we're in the interrupt delay window,
   6934 		 * delay the interrupt.
   6935 		 */
   6936 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6937 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6938 
   6939 		/*
   6940 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6941 		 * up the descriptor to encapsulate the packet for us.
   6942 		 *
   6943 		 * This is only valid on the last descriptor of the packet.
   6944 		 */
   6945 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6946 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6947 			    htole32(WTX_CMD_VLE);
   6948 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6949 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6950 		}
   6951 
   6952 		txs->txs_lastdesc = lasttx;
   6953 
   6954 		DPRINTF(WM_DEBUG_TX,
   6955 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6956 		    device_xname(sc->sc_dev),
   6957 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6958 
   6959 		/* Sync the descriptors we're using. */
   6960 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6961 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6962 
   6963 		/* Give the packet to the chip. */
   6964 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6965 
   6966 		DPRINTF(WM_DEBUG_TX,
   6967 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6968 
   6969 		DPRINTF(WM_DEBUG_TX,
   6970 		    ("%s: TX: finished transmitting packet, job %d\n",
   6971 		    device_xname(sc->sc_dev), txq->txq_snext));
   6972 
   6973 		/* Advance the tx pointer. */
   6974 		txq->txq_free -= txs->txs_ndesc;
   6975 		txq->txq_next = nexttx;
   6976 
   6977 		txq->txq_sfree--;
   6978 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6979 
   6980 		/* Pass the packet to any BPF listeners. */
   6981 		bpf_mtap(ifp, m0);
   6982 	}
   6983 
   6984 	if (m0 != NULL) {
   6985 		if (!is_transmit)
   6986 			ifp->if_flags |= IFF_OACTIVE;
   6987 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6988 		WM_Q_EVCNT_INCR(txq, txdrop);
   6989 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6990 			__func__));
   6991 		m_freem(m0);
   6992 	}
   6993 
   6994 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6995 		/* No more slots; notify upper layer. */
   6996 		if (!is_transmit)
   6997 			ifp->if_flags |= IFF_OACTIVE;
   6998 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6999 	}
   7000 
   7001 	if (txq->txq_free != ofree) {
   7002 		/* Set a watchdog timer in case the chip flakes out. */
   7003 		ifp->if_timer = 5;
   7004 	}
   7005 }
   7006 
   7007 /*
   7008  * wm_nq_tx_offload:
   7009  *
   7010  *	Set up TCP/IP checksumming parameters for the
   7011  *	specified packet, for NEWQUEUE devices
   7012  */
   7013 static int
   7014 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7015     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7016 {
   7017 	struct mbuf *m0 = txs->txs_mbuf;
   7018 	struct m_tag *mtag;
   7019 	uint32_t vl_len, mssidx, cmdc;
   7020 	struct ether_header *eh;
   7021 	int offset, iphl;
   7022 
   7023 	/*
   7024 	 * XXX It would be nice if the mbuf pkthdr had offset
   7025 	 * fields for the protocol headers.
   7026 	 */
   7027 	*cmdlenp = 0;
   7028 	*fieldsp = 0;
   7029 
   7030 	eh = mtod(m0, struct ether_header *);
   7031 	switch (htons(eh->ether_type)) {
   7032 	case ETHERTYPE_IP:
   7033 	case ETHERTYPE_IPV6:
   7034 		offset = ETHER_HDR_LEN;
   7035 		break;
   7036 
   7037 	case ETHERTYPE_VLAN:
   7038 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7039 		break;
   7040 
   7041 	default:
   7042 		/* Don't support this protocol or encapsulation. */
   7043 		*do_csum = false;
   7044 		return 0;
   7045 	}
   7046 	*do_csum = true;
   7047 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7048 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7049 
   7050 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7051 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7052 
   7053 	if ((m0->m_pkthdr.csum_flags &
   7054 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7055 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7056 	} else {
   7057 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7058 	}
   7059 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7060 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7061 
   7062 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7063 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7064 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7065 		*cmdlenp |= NQTX_CMD_VLE;
   7066 	}
   7067 
   7068 	mssidx = 0;
   7069 
   7070 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7071 		int hlen = offset + iphl;
   7072 		int tcp_hlen;
   7073 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7074 
   7075 		if (__predict_false(m0->m_len <
   7076 				    (hlen + sizeof(struct tcphdr)))) {
   7077 			/*
   7078 			 * TCP/IP headers are not in the first mbuf; we need
   7079 			 * to do this the slow and painful way.  Let's just
   7080 			 * hope this doesn't happen very often.
   7081 			 */
   7082 			struct tcphdr th;
   7083 
   7084 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7085 
   7086 			m_copydata(m0, hlen, sizeof(th), &th);
   7087 			if (v4) {
   7088 				struct ip ip;
   7089 
   7090 				m_copydata(m0, offset, sizeof(ip), &ip);
   7091 				ip.ip_len = 0;
   7092 				m_copyback(m0,
   7093 				    offset + offsetof(struct ip, ip_len),
   7094 				    sizeof(ip.ip_len), &ip.ip_len);
   7095 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7096 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7097 			} else {
   7098 				struct ip6_hdr ip6;
   7099 
   7100 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7101 				ip6.ip6_plen = 0;
   7102 				m_copyback(m0,
   7103 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7104 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7105 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7106 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7107 			}
   7108 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7109 			    sizeof(th.th_sum), &th.th_sum);
   7110 
   7111 			tcp_hlen = th.th_off << 2;
   7112 		} else {
   7113 			/*
   7114 			 * TCP/IP headers are in the first mbuf; we can do
   7115 			 * this the easy way.
   7116 			 */
   7117 			struct tcphdr *th;
   7118 
   7119 			if (v4) {
   7120 				struct ip *ip =
   7121 				    (void *)(mtod(m0, char *) + offset);
   7122 				th = (void *)(mtod(m0, char *) + hlen);
   7123 
   7124 				ip->ip_len = 0;
   7125 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7126 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7127 			} else {
   7128 				struct ip6_hdr *ip6 =
   7129 				    (void *)(mtod(m0, char *) + offset);
   7130 				th = (void *)(mtod(m0, char *) + hlen);
   7131 
   7132 				ip6->ip6_plen = 0;
   7133 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7134 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7135 			}
   7136 			tcp_hlen = th->th_off << 2;
   7137 		}
   7138 		hlen += tcp_hlen;
   7139 		*cmdlenp |= NQTX_CMD_TSE;
   7140 
   7141 		if (v4) {
   7142 			WM_Q_EVCNT_INCR(txq, txtso);
   7143 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7144 		} else {
   7145 			WM_Q_EVCNT_INCR(txq, txtso6);
   7146 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7147 		}
   7148 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7149 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7150 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7151 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7152 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7153 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7154 	} else {
   7155 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7156 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7157 	}
   7158 
   7159 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7160 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7161 		cmdc |= NQTXC_CMD_IP4;
   7162 	}
   7163 
   7164 	if (m0->m_pkthdr.csum_flags &
   7165 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7166 		WM_Q_EVCNT_INCR(txq, txtusum);
   7167 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7168 			cmdc |= NQTXC_CMD_TCP;
   7169 		} else {
   7170 			cmdc |= NQTXC_CMD_UDP;
   7171 		}
   7172 		cmdc |= NQTXC_CMD_IP4;
   7173 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7174 	}
   7175 	if (m0->m_pkthdr.csum_flags &
   7176 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7177 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7178 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7179 			cmdc |= NQTXC_CMD_TCP;
   7180 		} else {
   7181 			cmdc |= NQTXC_CMD_UDP;
   7182 		}
   7183 		cmdc |= NQTXC_CMD_IP6;
   7184 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7185 	}
   7186 
   7187 	/*
   7188 	 * We don't have to write context descriptor for every packet to
   7189 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7190 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7191 	 * controllers.
   7192 	 * It would be overhead to write context descriptor for every packet,
   7193 	 * however it does not cause problems.
   7194 	 */
   7195 	/* Fill in the context descriptor. */
   7196 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7197 	    htole32(vl_len);
   7198 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7199 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7200 	    htole32(cmdc);
   7201 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7202 	    htole32(mssidx);
   7203 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7204 	DPRINTF(WM_DEBUG_TX,
   7205 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7206 	    txq->txq_next, 0, vl_len));
   7207 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7208 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7209 	txs->txs_ndesc++;
   7210 	return 0;
   7211 }
   7212 
   7213 /*
   7214  * wm_nq_start:		[ifnet interface function]
   7215  *
   7216  *	Start packet transmission on the interface for NEWQUEUE devices
   7217  */
   7218 static void
   7219 wm_nq_start(struct ifnet *ifp)
   7220 {
   7221 	struct wm_softc *sc = ifp->if_softc;
   7222 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7223 
   7224 #ifdef WM_MPSAFE
   7225 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7226 #endif
   7227 	/*
   7228 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7229 	 */
   7230 
   7231 	mutex_enter(txq->txq_lock);
   7232 	if (!txq->txq_stopping)
   7233 		wm_nq_start_locked(ifp);
   7234 	mutex_exit(txq->txq_lock);
   7235 }
   7236 
   7237 static void
   7238 wm_nq_start_locked(struct ifnet *ifp)
   7239 {
   7240 	struct wm_softc *sc = ifp->if_softc;
   7241 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7242 
   7243 	wm_nq_send_common_locked(ifp, txq, false);
   7244 }
   7245 
   7246 static int
   7247 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7248 {
   7249 	int qid;
   7250 	struct wm_softc *sc = ifp->if_softc;
   7251 	struct wm_txqueue *txq;
   7252 
   7253 	qid = wm_select_txqueue(ifp, m);
   7254 	txq = &sc->sc_queue[qid].wmq_txq;
   7255 
   7256 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7257 		m_freem(m);
   7258 		WM_Q_EVCNT_INCR(txq, txdrop);
   7259 		return ENOBUFS;
   7260 	}
   7261 
   7262 	/*
   7263 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7264 	 */
   7265 	ifp->if_obytes += m->m_pkthdr.len;
   7266 	if (m->m_flags & M_MCAST)
   7267 		ifp->if_omcasts++;
   7268 
   7269 	/*
   7270 	 * The situations which this mutex_tryenter() fails at running time
   7271 	 * are below two patterns.
   7272 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7273 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7274 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7275 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7276 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7277 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7278 	 */
   7279 	if (mutex_tryenter(txq->txq_lock)) {
   7280 		if (!txq->txq_stopping)
   7281 			wm_nq_transmit_locked(ifp, txq);
   7282 		mutex_exit(txq->txq_lock);
   7283 	}
   7284 
   7285 	return 0;
   7286 }
   7287 
   7288 static void
   7289 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7290 {
   7291 
   7292 	wm_nq_send_common_locked(ifp, txq, true);
   7293 }
   7294 
   7295 static void
   7296 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7297     bool is_transmit)
   7298 {
   7299 	struct wm_softc *sc = ifp->if_softc;
   7300 	struct mbuf *m0;
   7301 	struct m_tag *mtag;
   7302 	struct wm_txsoft *txs;
   7303 	bus_dmamap_t dmamap;
   7304 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7305 	bool do_csum, sent;
   7306 
   7307 	KASSERT(mutex_owned(txq->txq_lock));
   7308 
   7309 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7310 		return;
   7311 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7312 		return;
   7313 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7314 		return;
   7315 
   7316 	sent = false;
   7317 
   7318 	/*
   7319 	 * Loop through the send queue, setting up transmit descriptors
   7320 	 * until we drain the queue, or use up all available transmit
   7321 	 * descriptors.
   7322 	 */
   7323 	for (;;) {
   7324 		m0 = NULL;
   7325 
   7326 		/* Get a work queue entry. */
   7327 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7328 			wm_txeof(sc, txq);
   7329 			if (txq->txq_sfree == 0) {
   7330 				DPRINTF(WM_DEBUG_TX,
   7331 				    ("%s: TX: no free job descriptors\n",
   7332 					device_xname(sc->sc_dev)));
   7333 				WM_Q_EVCNT_INCR(txq, txsstall);
   7334 				break;
   7335 			}
   7336 		}
   7337 
   7338 		/* Grab a packet off the queue. */
   7339 		if (is_transmit)
   7340 			m0 = pcq_get(txq->txq_interq);
   7341 		else
   7342 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7343 		if (m0 == NULL)
   7344 			break;
   7345 
   7346 		DPRINTF(WM_DEBUG_TX,
   7347 		    ("%s: TX: have packet to transmit: %p\n",
   7348 		    device_xname(sc->sc_dev), m0));
   7349 
   7350 		txs = &txq->txq_soft[txq->txq_snext];
   7351 		dmamap = txs->txs_dmamap;
   7352 
   7353 		/*
   7354 		 * Load the DMA map.  If this fails, the packet either
   7355 		 * didn't fit in the allotted number of segments, or we
   7356 		 * were short on resources.  For the too-many-segments
   7357 		 * case, we simply report an error and drop the packet,
   7358 		 * since we can't sanely copy a jumbo packet to a single
   7359 		 * buffer.
   7360 		 */
   7361 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7362 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7363 		if (error) {
   7364 			if (error == EFBIG) {
   7365 				WM_Q_EVCNT_INCR(txq, txdrop);
   7366 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7367 				    "DMA segments, dropping...\n",
   7368 				    device_xname(sc->sc_dev));
   7369 				wm_dump_mbuf_chain(sc, m0);
   7370 				m_freem(m0);
   7371 				continue;
   7372 			}
   7373 			/* Short on resources, just stop for now. */
   7374 			DPRINTF(WM_DEBUG_TX,
   7375 			    ("%s: TX: dmamap load failed: %d\n",
   7376 			    device_xname(sc->sc_dev), error));
   7377 			break;
   7378 		}
   7379 
   7380 		segs_needed = dmamap->dm_nsegs;
   7381 
   7382 		/*
   7383 		 * Ensure we have enough descriptors free to describe
   7384 		 * the packet.  Note, we always reserve one descriptor
   7385 		 * at the end of the ring due to the semantics of the
   7386 		 * TDT register, plus one more in the event we need
   7387 		 * to load offload context.
   7388 		 */
   7389 		if (segs_needed > txq->txq_free - 2) {
   7390 			/*
   7391 			 * Not enough free descriptors to transmit this
   7392 			 * packet.  We haven't committed anything yet,
   7393 			 * so just unload the DMA map, put the packet
   7394 			 * pack on the queue, and punt.  Notify the upper
   7395 			 * layer that there are no more slots left.
   7396 			 */
   7397 			DPRINTF(WM_DEBUG_TX,
   7398 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7399 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7400 			    segs_needed, txq->txq_free - 1));
   7401 			if (!is_transmit)
   7402 				ifp->if_flags |= IFF_OACTIVE;
   7403 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7404 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7405 			WM_Q_EVCNT_INCR(txq, txdstall);
   7406 			break;
   7407 		}
   7408 
   7409 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7410 
   7411 		DPRINTF(WM_DEBUG_TX,
   7412 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7413 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7414 
   7415 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7416 
   7417 		/*
   7418 		 * Store a pointer to the packet so that we can free it
   7419 		 * later.
   7420 		 *
   7421 		 * Initially, we consider the number of descriptors the
   7422 		 * packet uses the number of DMA segments.  This may be
   7423 		 * incremented by 1 if we do checksum offload (a descriptor
   7424 		 * is used to set the checksum context).
   7425 		 */
   7426 		txs->txs_mbuf = m0;
   7427 		txs->txs_firstdesc = txq->txq_next;
   7428 		txs->txs_ndesc = segs_needed;
   7429 
   7430 		/* Set up offload parameters for this packet. */
   7431 		uint32_t cmdlen, fields, dcmdlen;
   7432 		if (m0->m_pkthdr.csum_flags &
   7433 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7434 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7435 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7436 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7437 			    &do_csum) != 0) {
   7438 				/* Error message already displayed. */
   7439 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7440 				continue;
   7441 			}
   7442 		} else {
   7443 			do_csum = false;
   7444 			cmdlen = 0;
   7445 			fields = 0;
   7446 		}
   7447 
   7448 		/* Sync the DMA map. */
   7449 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7450 		    BUS_DMASYNC_PREWRITE);
   7451 
   7452 		/* Initialize the first transmit descriptor. */
   7453 		nexttx = txq->txq_next;
   7454 		if (!do_csum) {
   7455 			/* setup a legacy descriptor */
   7456 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7457 			    dmamap->dm_segs[0].ds_addr);
   7458 			txq->txq_descs[nexttx].wtx_cmdlen =
   7459 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7460 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7461 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7462 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7463 			    NULL) {
   7464 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7465 				    htole32(WTX_CMD_VLE);
   7466 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7467 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7468 			} else {
   7469 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7470 			}
   7471 			dcmdlen = 0;
   7472 		} else {
   7473 			/* setup an advanced data descriptor */
   7474 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7475 			    htole64(dmamap->dm_segs[0].ds_addr);
   7476 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7477 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7478 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7479 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7480 			    htole32(fields);
   7481 			DPRINTF(WM_DEBUG_TX,
   7482 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7483 			    device_xname(sc->sc_dev), nexttx,
   7484 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7485 			DPRINTF(WM_DEBUG_TX,
   7486 			    ("\t 0x%08x%08x\n", fields,
   7487 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7488 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7489 		}
   7490 
   7491 		lasttx = nexttx;
   7492 		nexttx = WM_NEXTTX(txq, nexttx);
   7493 		/*
   7494 		 * fill in the next descriptors. legacy or adcanced format
   7495 		 * is the same here
   7496 		 */
   7497 		for (seg = 1; seg < dmamap->dm_nsegs;
   7498 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7499 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7500 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7501 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7502 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7503 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7504 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7505 			lasttx = nexttx;
   7506 
   7507 			DPRINTF(WM_DEBUG_TX,
   7508 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7509 			     "len %#04zx\n",
   7510 			    device_xname(sc->sc_dev), nexttx,
   7511 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7512 			    dmamap->dm_segs[seg].ds_len));
   7513 		}
   7514 
   7515 		KASSERT(lasttx != -1);
   7516 
   7517 		/*
   7518 		 * Set up the command byte on the last descriptor of
   7519 		 * the packet.  If we're in the interrupt delay window,
   7520 		 * delay the interrupt.
   7521 		 */
   7522 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7523 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7524 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7525 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7526 
   7527 		txs->txs_lastdesc = lasttx;
   7528 
   7529 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7530 		    device_xname(sc->sc_dev),
   7531 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7532 
   7533 		/* Sync the descriptors we're using. */
   7534 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7535 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7536 
   7537 		/* Give the packet to the chip. */
   7538 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7539 		sent = true;
   7540 
   7541 		DPRINTF(WM_DEBUG_TX,
   7542 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7543 
   7544 		DPRINTF(WM_DEBUG_TX,
   7545 		    ("%s: TX: finished transmitting packet, job %d\n",
   7546 		    device_xname(sc->sc_dev), txq->txq_snext));
   7547 
   7548 		/* Advance the tx pointer. */
   7549 		txq->txq_free -= txs->txs_ndesc;
   7550 		txq->txq_next = nexttx;
   7551 
   7552 		txq->txq_sfree--;
   7553 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7554 
   7555 		/* Pass the packet to any BPF listeners. */
   7556 		bpf_mtap(ifp, m0);
   7557 	}
   7558 
   7559 	if (m0 != NULL) {
   7560 		if (!is_transmit)
   7561 			ifp->if_flags |= IFF_OACTIVE;
   7562 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7563 		WM_Q_EVCNT_INCR(txq, txdrop);
   7564 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7565 			__func__));
   7566 		m_freem(m0);
   7567 	}
   7568 
   7569 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7570 		/* No more slots; notify upper layer. */
   7571 		if (!is_transmit)
   7572 			ifp->if_flags |= IFF_OACTIVE;
   7573 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7574 	}
   7575 
   7576 	if (sent) {
   7577 		/* Set a watchdog timer in case the chip flakes out. */
   7578 		ifp->if_timer = 5;
   7579 	}
   7580 }
   7581 
   7582 static void
   7583 wm_deferred_start_locked(struct wm_txqueue *txq)
   7584 {
   7585 	struct wm_softc *sc = txq->txq_sc;
   7586 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7587 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7588 	int qid = wmq->wmq_id;
   7589 
   7590 	KASSERT(mutex_owned(txq->txq_lock));
   7591 
   7592 	if (txq->txq_stopping) {
   7593 		mutex_exit(txq->txq_lock);
   7594 		return;
   7595 	}
   7596 
   7597 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7598 		/* XXX need for ALTQ */
   7599 		if (qid == 0)
   7600 			wm_nq_start_locked(ifp);
   7601 		wm_nq_transmit_locked(ifp, txq);
   7602 	} else {
   7603 		/* XXX need for ALTQ */
   7604 		if (qid == 0)
   7605 			wm_start_locked(ifp);
   7606 		wm_transmit_locked(ifp, txq);
   7607 	}
   7608 }
   7609 
   7610 /* Interrupt */
   7611 
   7612 /*
   7613  * wm_txeof:
   7614  *
   7615  *	Helper; handle transmit interrupts.
   7616  */
   7617 static int
   7618 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7619 {
   7620 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7621 	struct wm_txsoft *txs;
   7622 	bool processed = false;
   7623 	int count = 0;
   7624 	int i;
   7625 	uint8_t status;
   7626 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7627 
   7628 	KASSERT(mutex_owned(txq->txq_lock));
   7629 
   7630 	if (txq->txq_stopping)
   7631 		return 0;
   7632 
   7633 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7634 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7635 	if (wmq->wmq_id == 0)
   7636 		ifp->if_flags &= ~IFF_OACTIVE;
   7637 
   7638 	/*
   7639 	 * Go through the Tx list and free mbufs for those
   7640 	 * frames which have been transmitted.
   7641 	 */
   7642 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7643 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7644 		txs = &txq->txq_soft[i];
   7645 
   7646 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7647 			device_xname(sc->sc_dev), i));
   7648 
   7649 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7650 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7651 
   7652 		status =
   7653 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7654 		if ((status & WTX_ST_DD) == 0) {
   7655 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7656 			    BUS_DMASYNC_PREREAD);
   7657 			break;
   7658 		}
   7659 
   7660 		processed = true;
   7661 		count++;
   7662 		DPRINTF(WM_DEBUG_TX,
   7663 		    ("%s: TX: job %d done: descs %d..%d\n",
   7664 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7665 		    txs->txs_lastdesc));
   7666 
   7667 		/*
   7668 		 * XXX We should probably be using the statistics
   7669 		 * XXX registers, but I don't know if they exist
   7670 		 * XXX on chips before the i82544.
   7671 		 */
   7672 
   7673 #ifdef WM_EVENT_COUNTERS
   7674 		if (status & WTX_ST_TU)
   7675 			WM_Q_EVCNT_INCR(txq, tu);
   7676 #endif /* WM_EVENT_COUNTERS */
   7677 
   7678 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7679 			ifp->if_oerrors++;
   7680 			if (status & WTX_ST_LC)
   7681 				log(LOG_WARNING, "%s: late collision\n",
   7682 				    device_xname(sc->sc_dev));
   7683 			else if (status & WTX_ST_EC) {
   7684 				ifp->if_collisions += 16;
   7685 				log(LOG_WARNING, "%s: excessive collisions\n",
   7686 				    device_xname(sc->sc_dev));
   7687 			}
   7688 		} else
   7689 			ifp->if_opackets++;
   7690 
   7691 		txq->txq_packets++;
   7692 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7693 
   7694 		txq->txq_free += txs->txs_ndesc;
   7695 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7696 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7697 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7698 		m_freem(txs->txs_mbuf);
   7699 		txs->txs_mbuf = NULL;
   7700 	}
   7701 
   7702 	/* Update the dirty transmit buffer pointer. */
   7703 	txq->txq_sdirty = i;
   7704 	DPRINTF(WM_DEBUG_TX,
   7705 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7706 
   7707 	if (count != 0)
   7708 		rnd_add_uint32(&sc->rnd_source, count);
   7709 
   7710 	/*
   7711 	 * If there are no more pending transmissions, cancel the watchdog
   7712 	 * timer.
   7713 	 */
   7714 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7715 		ifp->if_timer = 0;
   7716 
   7717 	return processed;
   7718 }
   7719 
   7720 static inline uint32_t
   7721 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7722 {
   7723 	struct wm_softc *sc = rxq->rxq_sc;
   7724 
   7725 	if (sc->sc_type == WM_T_82574)
   7726 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7727 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7728 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7729 	else
   7730 		return rxq->rxq_descs[idx].wrx_status;
   7731 }
   7732 
   7733 static inline uint32_t
   7734 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7735 {
   7736 	struct wm_softc *sc = rxq->rxq_sc;
   7737 
   7738 	if (sc->sc_type == WM_T_82574)
   7739 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7740 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7741 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7742 	else
   7743 		return rxq->rxq_descs[idx].wrx_errors;
   7744 }
   7745 
   7746 static inline uint16_t
   7747 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7748 {
   7749 	struct wm_softc *sc = rxq->rxq_sc;
   7750 
   7751 	if (sc->sc_type == WM_T_82574)
   7752 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7753 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7754 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7755 	else
   7756 		return rxq->rxq_descs[idx].wrx_special;
   7757 }
   7758 
   7759 static inline int
   7760 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7761 {
   7762 	struct wm_softc *sc = rxq->rxq_sc;
   7763 
   7764 	if (sc->sc_type == WM_T_82574)
   7765 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7766 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7767 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7768 	else
   7769 		return rxq->rxq_descs[idx].wrx_len;
   7770 }
   7771 
   7772 #ifdef WM_DEBUG
   7773 static inline uint32_t
   7774 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7775 {
   7776 	struct wm_softc *sc = rxq->rxq_sc;
   7777 
   7778 	if (sc->sc_type == WM_T_82574)
   7779 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7780 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7781 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7782 	else
   7783 		return 0;
   7784 }
   7785 
   7786 static inline uint8_t
   7787 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7788 {
   7789 	struct wm_softc *sc = rxq->rxq_sc;
   7790 
   7791 	if (sc->sc_type == WM_T_82574)
   7792 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7793 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7794 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7795 	else
   7796 		return 0;
   7797 }
   7798 #endif /* WM_DEBUG */
   7799 
   7800 static inline bool
   7801 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7802     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7803 {
   7804 
   7805 	if (sc->sc_type == WM_T_82574)
   7806 		return (status & ext_bit) != 0;
   7807 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7808 		return (status & nq_bit) != 0;
   7809 	else
   7810 		return (status & legacy_bit) != 0;
   7811 }
   7812 
   7813 static inline bool
   7814 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7815     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7816 {
   7817 
   7818 	if (sc->sc_type == WM_T_82574)
   7819 		return (error & ext_bit) != 0;
   7820 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7821 		return (error & nq_bit) != 0;
   7822 	else
   7823 		return (error & legacy_bit) != 0;
   7824 }
   7825 
   7826 static inline bool
   7827 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7828 {
   7829 
   7830 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7831 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7832 		return true;
   7833 	else
   7834 		return false;
   7835 }
   7836 
   7837 static inline bool
   7838 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7839 {
   7840 	struct wm_softc *sc = rxq->rxq_sc;
   7841 
   7842 	/* XXXX missing error bit for newqueue? */
   7843 	if (wm_rxdesc_is_set_error(sc, errors,
   7844 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7845 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7846 		NQRXC_ERROR_RXE)) {
   7847 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7848 			log(LOG_WARNING, "%s: symbol error\n",
   7849 			    device_xname(sc->sc_dev));
   7850 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7851 			log(LOG_WARNING, "%s: receive sequence error\n",
   7852 			    device_xname(sc->sc_dev));
   7853 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7854 			log(LOG_WARNING, "%s: CRC error\n",
   7855 			    device_xname(sc->sc_dev));
   7856 		return true;
   7857 	}
   7858 
   7859 	return false;
   7860 }
   7861 
   7862 static inline bool
   7863 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7864 {
   7865 	struct wm_softc *sc = rxq->rxq_sc;
   7866 
   7867 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7868 		NQRXC_STATUS_DD)) {
   7869 		/* We have processed all of the receive descriptors. */
   7870 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7871 		return false;
   7872 	}
   7873 
   7874 	return true;
   7875 }
   7876 
   7877 static inline bool
   7878 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7879     struct mbuf *m)
   7880 {
   7881 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7882 
   7883 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7884 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7885 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7886 	}
   7887 
   7888 	return true;
   7889 }
   7890 
   7891 static inline void
   7892 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7893     uint32_t errors, struct mbuf *m)
   7894 {
   7895 	struct wm_softc *sc = rxq->rxq_sc;
   7896 
   7897 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7898 		if (wm_rxdesc_is_set_status(sc, status,
   7899 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7900 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7901 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7902 			if (wm_rxdesc_is_set_error(sc, errors,
   7903 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7904 				m->m_pkthdr.csum_flags |=
   7905 					M_CSUM_IPv4_BAD;
   7906 		}
   7907 		if (wm_rxdesc_is_set_status(sc, status,
   7908 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7909 			/*
   7910 			 * Note: we don't know if this was TCP or UDP,
   7911 			 * so we just set both bits, and expect the
   7912 			 * upper layers to deal.
   7913 			 */
   7914 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7915 			m->m_pkthdr.csum_flags |=
   7916 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7917 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7918 			if (wm_rxdesc_is_set_error(sc, errors,
   7919 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7920 				m->m_pkthdr.csum_flags |=
   7921 					M_CSUM_TCP_UDP_BAD;
   7922 		}
   7923 	}
   7924 }
   7925 
   7926 /*
   7927  * wm_rxeof:
   7928  *
   7929  *	Helper; handle receive interrupts.
   7930  */
   7931 static void
   7932 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7933 {
   7934 	struct wm_softc *sc = rxq->rxq_sc;
   7935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7936 	struct wm_rxsoft *rxs;
   7937 	struct mbuf *m;
   7938 	int i, len;
   7939 	int count = 0;
   7940 	uint32_t status, errors;
   7941 	uint16_t vlantag;
   7942 
   7943 	KASSERT(mutex_owned(rxq->rxq_lock));
   7944 
   7945 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7946 		if (limit-- == 0) {
   7947 			rxq->rxq_ptr = i;
   7948 			break;
   7949 		}
   7950 
   7951 		rxs = &rxq->rxq_soft[i];
   7952 
   7953 		DPRINTF(WM_DEBUG_RX,
   7954 		    ("%s: RX: checking descriptor %d\n",
   7955 		    device_xname(sc->sc_dev), i));
   7956 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7957 
   7958 		status = wm_rxdesc_get_status(rxq, i);
   7959 		errors = wm_rxdesc_get_errors(rxq, i);
   7960 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7961 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7962 #ifdef WM_DEBUG
   7963 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7964 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7965 #endif
   7966 
   7967 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7968 			/*
   7969 			 * Update the receive pointer holding rxq_lock
   7970 			 * consistent with increment counter.
   7971 			 */
   7972 			rxq->rxq_ptr = i;
   7973 			break;
   7974 		}
   7975 
   7976 		count++;
   7977 		if (__predict_false(rxq->rxq_discard)) {
   7978 			DPRINTF(WM_DEBUG_RX,
   7979 			    ("%s: RX: discarding contents of descriptor %d\n",
   7980 			    device_xname(sc->sc_dev), i));
   7981 			wm_init_rxdesc(rxq, i);
   7982 			if (wm_rxdesc_is_eop(rxq, status)) {
   7983 				/* Reset our state. */
   7984 				DPRINTF(WM_DEBUG_RX,
   7985 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7986 				    device_xname(sc->sc_dev)));
   7987 				rxq->rxq_discard = 0;
   7988 			}
   7989 			continue;
   7990 		}
   7991 
   7992 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7993 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7994 
   7995 		m = rxs->rxs_mbuf;
   7996 
   7997 		/*
   7998 		 * Add a new receive buffer to the ring, unless of
   7999 		 * course the length is zero. Treat the latter as a
   8000 		 * failed mapping.
   8001 		 */
   8002 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8003 			/*
   8004 			 * Failed, throw away what we've done so
   8005 			 * far, and discard the rest of the packet.
   8006 			 */
   8007 			ifp->if_ierrors++;
   8008 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8009 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8010 			wm_init_rxdesc(rxq, i);
   8011 			if (!wm_rxdesc_is_eop(rxq, status))
   8012 				rxq->rxq_discard = 1;
   8013 			if (rxq->rxq_head != NULL)
   8014 				m_freem(rxq->rxq_head);
   8015 			WM_RXCHAIN_RESET(rxq);
   8016 			DPRINTF(WM_DEBUG_RX,
   8017 			    ("%s: RX: Rx buffer allocation failed, "
   8018 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8019 			    rxq->rxq_discard ? " (discard)" : ""));
   8020 			continue;
   8021 		}
   8022 
   8023 		m->m_len = len;
   8024 		rxq->rxq_len += len;
   8025 		DPRINTF(WM_DEBUG_RX,
   8026 		    ("%s: RX: buffer at %p len %d\n",
   8027 		    device_xname(sc->sc_dev), m->m_data, len));
   8028 
   8029 		/* If this is not the end of the packet, keep looking. */
   8030 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8031 			WM_RXCHAIN_LINK(rxq, m);
   8032 			DPRINTF(WM_DEBUG_RX,
   8033 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8034 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8035 			continue;
   8036 		}
   8037 
   8038 		/*
   8039 		 * Okay, we have the entire packet now.  The chip is
   8040 		 * configured to include the FCS except I350 and I21[01]
   8041 		 * (not all chips can be configured to strip it),
   8042 		 * so we need to trim it.
   8043 		 * May need to adjust length of previous mbuf in the
   8044 		 * chain if the current mbuf is too short.
   8045 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8046 		 * is always set in I350, so we don't trim it.
   8047 		 */
   8048 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8049 		    && (sc->sc_type != WM_T_I210)
   8050 		    && (sc->sc_type != WM_T_I211)) {
   8051 			if (m->m_len < ETHER_CRC_LEN) {
   8052 				rxq->rxq_tail->m_len
   8053 				    -= (ETHER_CRC_LEN - m->m_len);
   8054 				m->m_len = 0;
   8055 			} else
   8056 				m->m_len -= ETHER_CRC_LEN;
   8057 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8058 		} else
   8059 			len = rxq->rxq_len;
   8060 
   8061 		WM_RXCHAIN_LINK(rxq, m);
   8062 
   8063 		*rxq->rxq_tailp = NULL;
   8064 		m = rxq->rxq_head;
   8065 
   8066 		WM_RXCHAIN_RESET(rxq);
   8067 
   8068 		DPRINTF(WM_DEBUG_RX,
   8069 		    ("%s: RX: have entire packet, len -> %d\n",
   8070 		    device_xname(sc->sc_dev), len));
   8071 
   8072 		/* If an error occurred, update stats and drop the packet. */
   8073 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8074 			m_freem(m);
   8075 			continue;
   8076 		}
   8077 
   8078 		/* No errors.  Receive the packet. */
   8079 		m_set_rcvif(m, ifp);
   8080 		m->m_pkthdr.len = len;
   8081 		/*
   8082 		 * TODO
   8083 		 * should be save rsshash and rsstype to this mbuf.
   8084 		 */
   8085 		DPRINTF(WM_DEBUG_RX,
   8086 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8087 			device_xname(sc->sc_dev), rsstype, rsshash));
   8088 
   8089 		/*
   8090 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8091 		 * for us.  Associate the tag with the packet.
   8092 		 */
   8093 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8094 			continue;
   8095 
   8096 		/* Set up checksum info for this packet. */
   8097 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8098 		/*
   8099 		 * Update the receive pointer holding rxq_lock consistent with
   8100 		 * increment counter.
   8101 		 */
   8102 		rxq->rxq_ptr = i;
   8103 		rxq->rxq_packets++;
   8104 		rxq->rxq_bytes += len;
   8105 		mutex_exit(rxq->rxq_lock);
   8106 
   8107 		/* Pass it on. */
   8108 		if_percpuq_enqueue(sc->sc_ipq, m);
   8109 
   8110 		mutex_enter(rxq->rxq_lock);
   8111 
   8112 		if (rxq->rxq_stopping)
   8113 			break;
   8114 	}
   8115 
   8116 	if (count != 0)
   8117 		rnd_add_uint32(&sc->rnd_source, count);
   8118 
   8119 	DPRINTF(WM_DEBUG_RX,
   8120 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8121 }
   8122 
   8123 /*
   8124  * wm_linkintr_gmii:
   8125  *
   8126  *	Helper; handle link interrupts for GMII.
   8127  */
   8128 static void
   8129 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8130 {
   8131 
   8132 	KASSERT(WM_CORE_LOCKED(sc));
   8133 
   8134 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8135 		__func__));
   8136 
   8137 	if (icr & ICR_LSC) {
   8138 		uint32_t reg;
   8139 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8140 
   8141 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8142 			wm_gig_downshift_workaround_ich8lan(sc);
   8143 
   8144 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8145 			device_xname(sc->sc_dev)));
   8146 		mii_pollstat(&sc->sc_mii);
   8147 		if (sc->sc_type == WM_T_82543) {
   8148 			int miistatus, active;
   8149 
   8150 			/*
   8151 			 * With 82543, we need to force speed and
   8152 			 * duplex on the MAC equal to what the PHY
   8153 			 * speed and duplex configuration is.
   8154 			 */
   8155 			miistatus = sc->sc_mii.mii_media_status;
   8156 
   8157 			if (miistatus & IFM_ACTIVE) {
   8158 				active = sc->sc_mii.mii_media_active;
   8159 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8160 				switch (IFM_SUBTYPE(active)) {
   8161 				case IFM_10_T:
   8162 					sc->sc_ctrl |= CTRL_SPEED_10;
   8163 					break;
   8164 				case IFM_100_TX:
   8165 					sc->sc_ctrl |= CTRL_SPEED_100;
   8166 					break;
   8167 				case IFM_1000_T:
   8168 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8169 					break;
   8170 				default:
   8171 					/*
   8172 					 * fiber?
   8173 					 * Shoud not enter here.
   8174 					 */
   8175 					printf("unknown media (%x)\n", active);
   8176 					break;
   8177 				}
   8178 				if (active & IFM_FDX)
   8179 					sc->sc_ctrl |= CTRL_FD;
   8180 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8181 			}
   8182 		} else if ((sc->sc_type == WM_T_ICH8)
   8183 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8184 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8185 		} else if (sc->sc_type == WM_T_PCH) {
   8186 			wm_k1_gig_workaround_hv(sc,
   8187 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8188 		}
   8189 
   8190 		if ((sc->sc_phytype == WMPHY_82578)
   8191 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8192 			== IFM_1000_T)) {
   8193 
   8194 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8195 				delay(200*1000); /* XXX too big */
   8196 
   8197 				/* Link stall fix for link up */
   8198 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8199 				    HV_MUX_DATA_CTRL,
   8200 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8201 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8202 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8203 				    HV_MUX_DATA_CTRL,
   8204 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8205 			}
   8206 		}
   8207 		/*
   8208 		 * I217 Packet Loss issue:
   8209 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8210 		 * on power up.
   8211 		 * Set the Beacon Duration for I217 to 8 usec
   8212 		 */
   8213 		if ((sc->sc_type == WM_T_PCH_LPT)
   8214 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8215 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8216 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8217 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8218 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8219 		}
   8220 
   8221 		/* XXX Work-around I218 hang issue */
   8222 		/* e1000_k1_workaround_lpt_lp() */
   8223 
   8224 		if ((sc->sc_type == WM_T_PCH_LPT)
   8225 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8226 			/*
   8227 			 * Set platform power management values for Latency
   8228 			 * Tolerance Reporting (LTR)
   8229 			 */
   8230 			wm_platform_pm_pch_lpt(sc,
   8231 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8232 				    != 0));
   8233 		}
   8234 
   8235 		/* FEXTNVM6 K1-off workaround */
   8236 		if (sc->sc_type == WM_T_PCH_SPT) {
   8237 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8238 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8239 			    & FEXTNVM6_K1_OFF_ENABLE)
   8240 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8241 			else
   8242 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8243 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8244 		}
   8245 	} else if (icr & ICR_RXSEQ) {
   8246 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8247 			device_xname(sc->sc_dev)));
   8248 	}
   8249 }
   8250 
   8251 /*
   8252  * wm_linkintr_tbi:
   8253  *
   8254  *	Helper; handle link interrupts for TBI mode.
   8255  */
   8256 static void
   8257 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8258 {
   8259 	uint32_t status;
   8260 
   8261 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8262 		__func__));
   8263 
   8264 	status = CSR_READ(sc, WMREG_STATUS);
   8265 	if (icr & ICR_LSC) {
   8266 		if (status & STATUS_LU) {
   8267 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8268 			    device_xname(sc->sc_dev),
   8269 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8270 			/*
   8271 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8272 			 * so we should update sc->sc_ctrl
   8273 			 */
   8274 
   8275 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8276 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8277 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8278 			if (status & STATUS_FD)
   8279 				sc->sc_tctl |=
   8280 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8281 			else
   8282 				sc->sc_tctl |=
   8283 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8284 			if (sc->sc_ctrl & CTRL_TFCE)
   8285 				sc->sc_fcrtl |= FCRTL_XONE;
   8286 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8287 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8288 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8289 				      sc->sc_fcrtl);
   8290 			sc->sc_tbi_linkup = 1;
   8291 		} else {
   8292 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8293 			    device_xname(sc->sc_dev)));
   8294 			sc->sc_tbi_linkup = 0;
   8295 		}
   8296 		/* Update LED */
   8297 		wm_tbi_serdes_set_linkled(sc);
   8298 	} else if (icr & ICR_RXSEQ) {
   8299 		DPRINTF(WM_DEBUG_LINK,
   8300 		    ("%s: LINK: Receive sequence error\n",
   8301 		    device_xname(sc->sc_dev)));
   8302 	}
   8303 }
   8304 
   8305 /*
   8306  * wm_linkintr_serdes:
   8307  *
   8308  *	Helper; handle link interrupts for TBI mode.
   8309  */
   8310 static void
   8311 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8312 {
   8313 	struct mii_data *mii = &sc->sc_mii;
   8314 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8315 	uint32_t pcs_adv, pcs_lpab, reg;
   8316 
   8317 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8318 		__func__));
   8319 
   8320 	if (icr & ICR_LSC) {
   8321 		/* Check PCS */
   8322 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8323 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8324 			mii->mii_media_status |= IFM_ACTIVE;
   8325 			sc->sc_tbi_linkup = 1;
   8326 		} else {
   8327 			mii->mii_media_status |= IFM_NONE;
   8328 			sc->sc_tbi_linkup = 0;
   8329 			wm_tbi_serdes_set_linkled(sc);
   8330 			return;
   8331 		}
   8332 		mii->mii_media_active |= IFM_1000_SX;
   8333 		if ((reg & PCS_LSTS_FDX) != 0)
   8334 			mii->mii_media_active |= IFM_FDX;
   8335 		else
   8336 			mii->mii_media_active |= IFM_HDX;
   8337 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8338 			/* Check flow */
   8339 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8340 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8341 				DPRINTF(WM_DEBUG_LINK,
   8342 				    ("XXX LINKOK but not ACOMP\n"));
   8343 				return;
   8344 			}
   8345 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8346 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8347 			DPRINTF(WM_DEBUG_LINK,
   8348 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8349 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8350 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8351 				mii->mii_media_active |= IFM_FLOW
   8352 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8353 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8354 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8355 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8356 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8357 				mii->mii_media_active |= IFM_FLOW
   8358 				    | IFM_ETH_TXPAUSE;
   8359 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8360 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8361 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8362 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8363 				mii->mii_media_active |= IFM_FLOW
   8364 				    | IFM_ETH_RXPAUSE;
   8365 		}
   8366 		/* Update LED */
   8367 		wm_tbi_serdes_set_linkled(sc);
   8368 	} else {
   8369 		DPRINTF(WM_DEBUG_LINK,
   8370 		    ("%s: LINK: Receive sequence error\n",
   8371 		    device_xname(sc->sc_dev)));
   8372 	}
   8373 }
   8374 
   8375 /*
   8376  * wm_linkintr:
   8377  *
   8378  *	Helper; handle link interrupts.
   8379  */
   8380 static void
   8381 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8382 {
   8383 
   8384 	KASSERT(WM_CORE_LOCKED(sc));
   8385 
   8386 	if (sc->sc_flags & WM_F_HAS_MII)
   8387 		wm_linkintr_gmii(sc, icr);
   8388 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8389 	    && (sc->sc_type >= WM_T_82575))
   8390 		wm_linkintr_serdes(sc, icr);
   8391 	else
   8392 		wm_linkintr_tbi(sc, icr);
   8393 }
   8394 
   8395 /*
   8396  * wm_intr_legacy:
   8397  *
   8398  *	Interrupt service routine for INTx and MSI.
   8399  */
   8400 static int
   8401 wm_intr_legacy(void *arg)
   8402 {
   8403 	struct wm_softc *sc = arg;
   8404 	struct wm_queue *wmq = &sc->sc_queue[0];
   8405 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8406 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8407 	uint32_t icr, rndval = 0;
   8408 	int handled = 0;
   8409 
   8410 	DPRINTF(WM_DEBUG_TX,
   8411 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8412 	while (1 /* CONSTCOND */) {
   8413 		icr = CSR_READ(sc, WMREG_ICR);
   8414 		if ((icr & sc->sc_icr) == 0)
   8415 			break;
   8416 		if (rndval == 0)
   8417 			rndval = icr;
   8418 
   8419 		mutex_enter(rxq->rxq_lock);
   8420 
   8421 		if (rxq->rxq_stopping) {
   8422 			mutex_exit(rxq->rxq_lock);
   8423 			break;
   8424 		}
   8425 
   8426 		handled = 1;
   8427 
   8428 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8429 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8430 			DPRINTF(WM_DEBUG_RX,
   8431 			    ("%s: RX: got Rx intr 0x%08x\n",
   8432 			    device_xname(sc->sc_dev),
   8433 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8434 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8435 		}
   8436 #endif
   8437 		wm_rxeof(rxq, UINT_MAX);
   8438 
   8439 		mutex_exit(rxq->rxq_lock);
   8440 		mutex_enter(txq->txq_lock);
   8441 
   8442 		if (txq->txq_stopping) {
   8443 			mutex_exit(txq->txq_lock);
   8444 			break;
   8445 		}
   8446 
   8447 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8448 		if (icr & ICR_TXDW) {
   8449 			DPRINTF(WM_DEBUG_TX,
   8450 			    ("%s: TX: got TXDW interrupt\n",
   8451 			    device_xname(sc->sc_dev)));
   8452 			WM_Q_EVCNT_INCR(txq, txdw);
   8453 		}
   8454 #endif
   8455 		wm_txeof(sc, txq);
   8456 
   8457 		mutex_exit(txq->txq_lock);
   8458 		WM_CORE_LOCK(sc);
   8459 
   8460 		if (sc->sc_core_stopping) {
   8461 			WM_CORE_UNLOCK(sc);
   8462 			break;
   8463 		}
   8464 
   8465 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8466 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8467 			wm_linkintr(sc, icr);
   8468 		}
   8469 
   8470 		WM_CORE_UNLOCK(sc);
   8471 
   8472 		if (icr & ICR_RXO) {
   8473 #if defined(WM_DEBUG)
   8474 			log(LOG_WARNING, "%s: Receive overrun\n",
   8475 			    device_xname(sc->sc_dev));
   8476 #endif /* defined(WM_DEBUG) */
   8477 		}
   8478 	}
   8479 
   8480 	rnd_add_uint32(&sc->rnd_source, rndval);
   8481 
   8482 	if (handled) {
   8483 		/* Try to get more packets going. */
   8484 		softint_schedule(wmq->wmq_si);
   8485 	}
   8486 
   8487 	return handled;
   8488 }
   8489 
   8490 static inline void
   8491 wm_txrxintr_disable(struct wm_queue *wmq)
   8492 {
   8493 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8494 
   8495 	if (sc->sc_type == WM_T_82574)
   8496 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8497 	else if (sc->sc_type == WM_T_82575)
   8498 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8499 	else
   8500 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8501 }
   8502 
   8503 static inline void
   8504 wm_txrxintr_enable(struct wm_queue *wmq)
   8505 {
   8506 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8507 
   8508 	wm_itrs_calculate(sc, wmq);
   8509 
   8510 	if (sc->sc_type == WM_T_82574)
   8511 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8512 	else if (sc->sc_type == WM_T_82575)
   8513 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8514 	else
   8515 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8516 }
   8517 
   8518 static int
   8519 wm_txrxintr_msix(void *arg)
   8520 {
   8521 	struct wm_queue *wmq = arg;
   8522 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8523 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8524 	struct wm_softc *sc = txq->txq_sc;
   8525 	u_int limit = sc->sc_rx_intr_process_limit;
   8526 
   8527 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8528 
   8529 	DPRINTF(WM_DEBUG_TX,
   8530 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8531 
   8532 	wm_txrxintr_disable(wmq);
   8533 
   8534 	mutex_enter(txq->txq_lock);
   8535 
   8536 	if (txq->txq_stopping) {
   8537 		mutex_exit(txq->txq_lock);
   8538 		return 0;
   8539 	}
   8540 
   8541 	WM_Q_EVCNT_INCR(txq, txdw);
   8542 	wm_txeof(sc, txq);
   8543 	/* wm_deferred start() is done in wm_handle_queue(). */
   8544 	mutex_exit(txq->txq_lock);
   8545 
   8546 	DPRINTF(WM_DEBUG_RX,
   8547 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8548 	mutex_enter(rxq->rxq_lock);
   8549 
   8550 	if (rxq->rxq_stopping) {
   8551 		mutex_exit(rxq->rxq_lock);
   8552 		return 0;
   8553 	}
   8554 
   8555 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8556 	wm_rxeof(rxq, limit);
   8557 	mutex_exit(rxq->rxq_lock);
   8558 
   8559 	wm_itrs_writereg(sc, wmq);
   8560 
   8561 	softint_schedule(wmq->wmq_si);
   8562 
   8563 	return 1;
   8564 }
   8565 
   8566 static void
   8567 wm_handle_queue(void *arg)
   8568 {
   8569 	struct wm_queue *wmq = arg;
   8570 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8571 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8572 	struct wm_softc *sc = txq->txq_sc;
   8573 	u_int limit = sc->sc_rx_process_limit;
   8574 
   8575 	mutex_enter(txq->txq_lock);
   8576 	if (txq->txq_stopping) {
   8577 		mutex_exit(txq->txq_lock);
   8578 		return;
   8579 	}
   8580 	wm_txeof(sc, txq);
   8581 	wm_deferred_start_locked(txq);
   8582 	mutex_exit(txq->txq_lock);
   8583 
   8584 	mutex_enter(rxq->rxq_lock);
   8585 	if (rxq->rxq_stopping) {
   8586 		mutex_exit(rxq->rxq_lock);
   8587 		return;
   8588 	}
   8589 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8590 	wm_rxeof(rxq, limit);
   8591 	mutex_exit(rxq->rxq_lock);
   8592 
   8593 	wm_txrxintr_enable(wmq);
   8594 }
   8595 
   8596 /*
   8597  * wm_linkintr_msix:
   8598  *
   8599  *	Interrupt service routine for link status change for MSI-X.
   8600  */
   8601 static int
   8602 wm_linkintr_msix(void *arg)
   8603 {
   8604 	struct wm_softc *sc = arg;
   8605 	uint32_t reg;
   8606 
   8607 	DPRINTF(WM_DEBUG_LINK,
   8608 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8609 
   8610 	reg = CSR_READ(sc, WMREG_ICR);
   8611 	WM_CORE_LOCK(sc);
   8612 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8613 		goto out;
   8614 
   8615 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8616 	wm_linkintr(sc, ICR_LSC);
   8617 
   8618 out:
   8619 	WM_CORE_UNLOCK(sc);
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8623 	else if (sc->sc_type == WM_T_82575)
   8624 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8625 	else
   8626 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8627 
   8628 	return 1;
   8629 }
   8630 
   8631 /*
   8632  * Media related.
   8633  * GMII, SGMII, TBI (and SERDES)
   8634  */
   8635 
   8636 /* Common */
   8637 
   8638 /*
   8639  * wm_tbi_serdes_set_linkled:
   8640  *
   8641  *	Update the link LED on TBI and SERDES devices.
   8642  */
   8643 static void
   8644 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8645 {
   8646 
   8647 	if (sc->sc_tbi_linkup)
   8648 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8649 	else
   8650 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8651 
   8652 	/* 82540 or newer devices are active low */
   8653 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8654 
   8655 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8656 }
   8657 
   8658 /* GMII related */
   8659 
   8660 /*
   8661  * wm_gmii_reset:
   8662  *
   8663  *	Reset the PHY.
   8664  */
   8665 static void
   8666 wm_gmii_reset(struct wm_softc *sc)
   8667 {
   8668 	uint32_t reg;
   8669 	int rv;
   8670 
   8671 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8672 		device_xname(sc->sc_dev), __func__));
   8673 
   8674 	rv = sc->phy.acquire(sc);
   8675 	if (rv != 0) {
   8676 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8677 		    __func__);
   8678 		return;
   8679 	}
   8680 
   8681 	switch (sc->sc_type) {
   8682 	case WM_T_82542_2_0:
   8683 	case WM_T_82542_2_1:
   8684 		/* null */
   8685 		break;
   8686 	case WM_T_82543:
   8687 		/*
   8688 		 * With 82543, we need to force speed and duplex on the MAC
   8689 		 * equal to what the PHY speed and duplex configuration is.
   8690 		 * In addition, we need to perform a hardware reset on the PHY
   8691 		 * to take it out of reset.
   8692 		 */
   8693 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8694 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8695 
   8696 		/* The PHY reset pin is active-low. */
   8697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8698 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8699 		    CTRL_EXT_SWDPIN(4));
   8700 		reg |= CTRL_EXT_SWDPIO(4);
   8701 
   8702 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8703 		CSR_WRITE_FLUSH(sc);
   8704 		delay(10*1000);
   8705 
   8706 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8707 		CSR_WRITE_FLUSH(sc);
   8708 		delay(150);
   8709 #if 0
   8710 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8711 #endif
   8712 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8713 		break;
   8714 	case WM_T_82544:	/* reset 10000us */
   8715 	case WM_T_82540:
   8716 	case WM_T_82545:
   8717 	case WM_T_82545_3:
   8718 	case WM_T_82546:
   8719 	case WM_T_82546_3:
   8720 	case WM_T_82541:
   8721 	case WM_T_82541_2:
   8722 	case WM_T_82547:
   8723 	case WM_T_82547_2:
   8724 	case WM_T_82571:	/* reset 100us */
   8725 	case WM_T_82572:
   8726 	case WM_T_82573:
   8727 	case WM_T_82574:
   8728 	case WM_T_82575:
   8729 	case WM_T_82576:
   8730 	case WM_T_82580:
   8731 	case WM_T_I350:
   8732 	case WM_T_I354:
   8733 	case WM_T_I210:
   8734 	case WM_T_I211:
   8735 	case WM_T_82583:
   8736 	case WM_T_80003:
   8737 		/* generic reset */
   8738 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8739 		CSR_WRITE_FLUSH(sc);
   8740 		delay(20000);
   8741 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8742 		CSR_WRITE_FLUSH(sc);
   8743 		delay(20000);
   8744 
   8745 		if ((sc->sc_type == WM_T_82541)
   8746 		    || (sc->sc_type == WM_T_82541_2)
   8747 		    || (sc->sc_type == WM_T_82547)
   8748 		    || (sc->sc_type == WM_T_82547_2)) {
   8749 			/* workaround for igp are done in igp_reset() */
   8750 			/* XXX add code to set LED after phy reset */
   8751 		}
   8752 		break;
   8753 	case WM_T_ICH8:
   8754 	case WM_T_ICH9:
   8755 	case WM_T_ICH10:
   8756 	case WM_T_PCH:
   8757 	case WM_T_PCH2:
   8758 	case WM_T_PCH_LPT:
   8759 	case WM_T_PCH_SPT:
   8760 		/* generic reset */
   8761 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8762 		CSR_WRITE_FLUSH(sc);
   8763 		delay(100);
   8764 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8765 		CSR_WRITE_FLUSH(sc);
   8766 		delay(150);
   8767 		break;
   8768 	default:
   8769 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8770 		    __func__);
   8771 		break;
   8772 	}
   8773 
   8774 	sc->phy.release(sc);
   8775 
   8776 	/* get_cfg_done */
   8777 	wm_get_cfg_done(sc);
   8778 
   8779 	/* extra setup */
   8780 	switch (sc->sc_type) {
   8781 	case WM_T_82542_2_0:
   8782 	case WM_T_82542_2_1:
   8783 	case WM_T_82543:
   8784 	case WM_T_82544:
   8785 	case WM_T_82540:
   8786 	case WM_T_82545:
   8787 	case WM_T_82545_3:
   8788 	case WM_T_82546:
   8789 	case WM_T_82546_3:
   8790 	case WM_T_82541_2:
   8791 	case WM_T_82547_2:
   8792 	case WM_T_82571:
   8793 	case WM_T_82572:
   8794 	case WM_T_82573:
   8795 	case WM_T_82575:
   8796 	case WM_T_82576:
   8797 	case WM_T_82580:
   8798 	case WM_T_I350:
   8799 	case WM_T_I354:
   8800 	case WM_T_I210:
   8801 	case WM_T_I211:
   8802 	case WM_T_80003:
   8803 		/* null */
   8804 		break;
   8805 	case WM_T_82574:
   8806 	case WM_T_82583:
   8807 		wm_lplu_d0_disable(sc);
   8808 		break;
   8809 	case WM_T_82541:
   8810 	case WM_T_82547:
   8811 		/* XXX Configure actively LED after PHY reset */
   8812 		break;
   8813 	case WM_T_ICH8:
   8814 	case WM_T_ICH9:
   8815 	case WM_T_ICH10:
   8816 	case WM_T_PCH:
   8817 	case WM_T_PCH2:
   8818 	case WM_T_PCH_LPT:
   8819 	case WM_T_PCH_SPT:
   8820 		/* Allow time for h/w to get to a quiescent state afer reset */
   8821 		delay(10*1000);
   8822 
   8823 		if (sc->sc_type == WM_T_PCH)
   8824 			wm_hv_phy_workaround_ich8lan(sc);
   8825 
   8826 		if (sc->sc_type == WM_T_PCH2)
   8827 			wm_lv_phy_workaround_ich8lan(sc);
   8828 
   8829 		/* Clear the host wakeup bit after lcd reset */
   8830 		if (sc->sc_type >= WM_T_PCH) {
   8831 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8832 			    BM_PORT_GEN_CFG);
   8833 			reg &= ~BM_WUC_HOST_WU_BIT;
   8834 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8835 			    BM_PORT_GEN_CFG, reg);
   8836 		}
   8837 
   8838 		/*
   8839 		 * XXX Configure the LCD with th extended configuration region
   8840 		 * in NVM
   8841 		 */
   8842 
   8843 		/* Disable D0 LPLU. */
   8844 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8845 			wm_lplu_d0_disable_pch(sc);
   8846 		else
   8847 			wm_lplu_d0_disable(sc);	/* ICH* */
   8848 		break;
   8849 	default:
   8850 		panic("%s: unknown type\n", __func__);
   8851 		break;
   8852 	}
   8853 }
   8854 
   8855 /*
   8856  * Setup sc_phytype and mii_{read|write}reg.
   8857  *
   8858  *  To identify PHY type, correct read/write function should be selected.
   8859  * To select correct read/write function, PCI ID or MAC type are required
   8860  * without accessing PHY registers.
   8861  *
   8862  *  On the first call of this function, PHY ID is not known yet. Check
   8863  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8864  * result might be incorrect.
   8865  *
   8866  *  In the second call, PHY OUI and model is used to identify PHY type.
   8867  * It might not be perfpect because of the lack of compared entry, but it
   8868  * would be better than the first call.
   8869  *
   8870  *  If the detected new result and previous assumption is different,
   8871  * diagnous message will be printed.
   8872  */
   8873 static void
   8874 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8875     uint16_t phy_model)
   8876 {
   8877 	device_t dev = sc->sc_dev;
   8878 	struct mii_data *mii = &sc->sc_mii;
   8879 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8880 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8881 	mii_readreg_t new_readreg;
   8882 	mii_writereg_t new_writereg;
   8883 
   8884 	if (mii->mii_readreg == NULL) {
   8885 		/*
   8886 		 *  This is the first call of this function. For ICH and PCH
   8887 		 * variants, it's difficult to determine the PHY access method
   8888 		 * by sc_type, so use the PCI product ID for some devices.
   8889 		 */
   8890 
   8891 		switch (sc->sc_pcidevid) {
   8892 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8893 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8894 			/* 82577 */
   8895 			new_phytype = WMPHY_82577;
   8896 			break;
   8897 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8898 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8899 			/* 82578 */
   8900 			new_phytype = WMPHY_82578;
   8901 			break;
   8902 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8903 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8904 			/* 82579 */
   8905 			new_phytype = WMPHY_82579;
   8906 			break;
   8907 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8908 		case PCI_PRODUCT_INTEL_82801I_BM:
   8909 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8910 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8911 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8912 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8913 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8914 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8915 			/* ICH8, 9, 10 with 82567 */
   8916 			new_phytype = WMPHY_BM;
   8917 			break;
   8918 		default:
   8919 			break;
   8920 		}
   8921 	} else {
   8922 		/* It's not the first call. Use PHY OUI and model */
   8923 		switch (phy_oui) {
   8924 		case MII_OUI_ATHEROS: /* XXX ??? */
   8925 			switch (phy_model) {
   8926 			case 0x0004: /* XXX */
   8927 				new_phytype = WMPHY_82578;
   8928 				break;
   8929 			default:
   8930 				break;
   8931 			}
   8932 			break;
   8933 		case MII_OUI_xxMARVELL:
   8934 			switch (phy_model) {
   8935 			case MII_MODEL_xxMARVELL_I210:
   8936 				new_phytype = WMPHY_I210;
   8937 				break;
   8938 			case MII_MODEL_xxMARVELL_E1011:
   8939 			case MII_MODEL_xxMARVELL_E1000_3:
   8940 			case MII_MODEL_xxMARVELL_E1000_5:
   8941 			case MII_MODEL_xxMARVELL_E1112:
   8942 				new_phytype = WMPHY_M88;
   8943 				break;
   8944 			case MII_MODEL_xxMARVELL_E1149:
   8945 				new_phytype = WMPHY_BM;
   8946 				break;
   8947 			case MII_MODEL_xxMARVELL_E1111:
   8948 			case MII_MODEL_xxMARVELL_I347:
   8949 			case MII_MODEL_xxMARVELL_E1512:
   8950 			case MII_MODEL_xxMARVELL_E1340M:
   8951 			case MII_MODEL_xxMARVELL_E1543:
   8952 				new_phytype = WMPHY_M88;
   8953 				break;
   8954 			case MII_MODEL_xxMARVELL_I82563:
   8955 				new_phytype = WMPHY_GG82563;
   8956 				break;
   8957 			default:
   8958 				break;
   8959 			}
   8960 			break;
   8961 		case MII_OUI_INTEL:
   8962 			switch (phy_model) {
   8963 			case MII_MODEL_INTEL_I82577:
   8964 				new_phytype = WMPHY_82577;
   8965 				break;
   8966 			case MII_MODEL_INTEL_I82579:
   8967 				new_phytype = WMPHY_82579;
   8968 				break;
   8969 			case MII_MODEL_INTEL_I217:
   8970 				new_phytype = WMPHY_I217;
   8971 				break;
   8972 			case MII_MODEL_INTEL_I82580:
   8973 			case MII_MODEL_INTEL_I350:
   8974 				new_phytype = WMPHY_82580;
   8975 				break;
   8976 			default:
   8977 				break;
   8978 			}
   8979 			break;
   8980 		case MII_OUI_yyINTEL:
   8981 			switch (phy_model) {
   8982 			case MII_MODEL_yyINTEL_I82562G:
   8983 			case MII_MODEL_yyINTEL_I82562EM:
   8984 			case MII_MODEL_yyINTEL_I82562ET:
   8985 				new_phytype = WMPHY_IFE;
   8986 				break;
   8987 			case MII_MODEL_yyINTEL_IGP01E1000:
   8988 				new_phytype = WMPHY_IGP;
   8989 				break;
   8990 			case MII_MODEL_yyINTEL_I82566:
   8991 				new_phytype = WMPHY_IGP_3;
   8992 				break;
   8993 			default:
   8994 				break;
   8995 			}
   8996 			break;
   8997 		default:
   8998 			break;
   8999 		}
   9000 		if (new_phytype == WMPHY_UNKNOWN)
   9001 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9002 			    __func__);
   9003 
   9004 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9005 		    && (sc->sc_phytype != new_phytype )) {
   9006 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9007 			    "was incorrect. PHY type from PHY ID = %u\n",
   9008 			    sc->sc_phytype, new_phytype);
   9009 		}
   9010 	}
   9011 
   9012 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9013 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9014 		/* SGMII */
   9015 		new_readreg = wm_sgmii_readreg;
   9016 		new_writereg = wm_sgmii_writereg;
   9017 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9018 		/* BM2 (phyaddr == 1) */
   9019 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9020 		    && (new_phytype != WMPHY_BM)
   9021 		    && (new_phytype != WMPHY_UNKNOWN))
   9022 			doubt_phytype = new_phytype;
   9023 		new_phytype = WMPHY_BM;
   9024 		new_readreg = wm_gmii_bm_readreg;
   9025 		new_writereg = wm_gmii_bm_writereg;
   9026 	} else if (sc->sc_type >= WM_T_PCH) {
   9027 		/* All PCH* use _hv_ */
   9028 		new_readreg = wm_gmii_hv_readreg;
   9029 		new_writereg = wm_gmii_hv_writereg;
   9030 	} else if (sc->sc_type >= WM_T_ICH8) {
   9031 		/* non-82567 ICH8, 9 and 10 */
   9032 		new_readreg = wm_gmii_i82544_readreg;
   9033 		new_writereg = wm_gmii_i82544_writereg;
   9034 	} else if (sc->sc_type >= WM_T_80003) {
   9035 		/* 80003 */
   9036 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9037 		    && (new_phytype != WMPHY_GG82563)
   9038 		    && (new_phytype != WMPHY_UNKNOWN))
   9039 			doubt_phytype = new_phytype;
   9040 		new_phytype = WMPHY_GG82563;
   9041 		new_readreg = wm_gmii_i80003_readreg;
   9042 		new_writereg = wm_gmii_i80003_writereg;
   9043 	} else if (sc->sc_type >= WM_T_I210) {
   9044 		/* I210 and I211 */
   9045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9046 		    && (new_phytype != WMPHY_I210)
   9047 		    && (new_phytype != WMPHY_UNKNOWN))
   9048 			doubt_phytype = new_phytype;
   9049 		new_phytype = WMPHY_I210;
   9050 		new_readreg = wm_gmii_gs40g_readreg;
   9051 		new_writereg = wm_gmii_gs40g_writereg;
   9052 	} else if (sc->sc_type >= WM_T_82580) {
   9053 		/* 82580, I350 and I354 */
   9054 		new_readreg = wm_gmii_82580_readreg;
   9055 		new_writereg = wm_gmii_82580_writereg;
   9056 	} else if (sc->sc_type >= WM_T_82544) {
   9057 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9058 		new_readreg = wm_gmii_i82544_readreg;
   9059 		new_writereg = wm_gmii_i82544_writereg;
   9060 	} else {
   9061 		new_readreg = wm_gmii_i82543_readreg;
   9062 		new_writereg = wm_gmii_i82543_writereg;
   9063 	}
   9064 
   9065 	if (new_phytype == WMPHY_BM) {
   9066 		/* All BM use _bm_ */
   9067 		new_readreg = wm_gmii_bm_readreg;
   9068 		new_writereg = wm_gmii_bm_writereg;
   9069 	}
   9070 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9071 		/* All PCH* use _hv_ */
   9072 		new_readreg = wm_gmii_hv_readreg;
   9073 		new_writereg = wm_gmii_hv_writereg;
   9074 	}
   9075 
   9076 	/* Diag output */
   9077 	if (doubt_phytype != WMPHY_UNKNOWN)
   9078 		aprint_error_dev(dev, "Assumed new PHY type was "
   9079 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9080 		    new_phytype);
   9081 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9082 	    && (sc->sc_phytype != new_phytype ))
   9083 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9084 		    "was incorrect. New PHY type = %u\n",
   9085 		    sc->sc_phytype, new_phytype);
   9086 
   9087 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9088 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9089 
   9090 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9091 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9092 		    "function was incorrect.\n");
   9093 
   9094 	/* Update now */
   9095 	sc->sc_phytype = new_phytype;
   9096 	mii->mii_readreg = new_readreg;
   9097 	mii->mii_writereg = new_writereg;
   9098 }
   9099 
   9100 /*
   9101  * wm_get_phy_id_82575:
   9102  *
   9103  * Return PHY ID. Return -1 if it failed.
   9104  */
   9105 static int
   9106 wm_get_phy_id_82575(struct wm_softc *sc)
   9107 {
   9108 	uint32_t reg;
   9109 	int phyid = -1;
   9110 
   9111 	/* XXX */
   9112 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9113 		return -1;
   9114 
   9115 	if (wm_sgmii_uses_mdio(sc)) {
   9116 		switch (sc->sc_type) {
   9117 		case WM_T_82575:
   9118 		case WM_T_82576:
   9119 			reg = CSR_READ(sc, WMREG_MDIC);
   9120 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9121 			break;
   9122 		case WM_T_82580:
   9123 		case WM_T_I350:
   9124 		case WM_T_I354:
   9125 		case WM_T_I210:
   9126 		case WM_T_I211:
   9127 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9128 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9129 			break;
   9130 		default:
   9131 			return -1;
   9132 		}
   9133 	}
   9134 
   9135 	return phyid;
   9136 }
   9137 
   9138 
   9139 /*
   9140  * wm_gmii_mediainit:
   9141  *
   9142  *	Initialize media for use on 1000BASE-T devices.
   9143  */
   9144 static void
   9145 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9146 {
   9147 	device_t dev = sc->sc_dev;
   9148 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9149 	struct mii_data *mii = &sc->sc_mii;
   9150 	uint32_t reg;
   9151 
   9152 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9153 		device_xname(sc->sc_dev), __func__));
   9154 
   9155 	/* We have GMII. */
   9156 	sc->sc_flags |= WM_F_HAS_MII;
   9157 
   9158 	if (sc->sc_type == WM_T_80003)
   9159 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9160 	else
   9161 		sc->sc_tipg = TIPG_1000T_DFLT;
   9162 
   9163 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9164 	if ((sc->sc_type == WM_T_82580)
   9165 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9166 	    || (sc->sc_type == WM_T_I211)) {
   9167 		reg = CSR_READ(sc, WMREG_PHPM);
   9168 		reg &= ~PHPM_GO_LINK_D;
   9169 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9170 	}
   9171 
   9172 	/*
   9173 	 * Let the chip set speed/duplex on its own based on
   9174 	 * signals from the PHY.
   9175 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9176 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9177 	 */
   9178 	sc->sc_ctrl |= CTRL_SLU;
   9179 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9180 
   9181 	/* Initialize our media structures and probe the GMII. */
   9182 	mii->mii_ifp = ifp;
   9183 
   9184 	/*
   9185 	 * The first call of wm_mii_setup_phytype. The result might be
   9186 	 * incorrect.
   9187 	 */
   9188 	wm_gmii_setup_phytype(sc, 0, 0);
   9189 
   9190 	mii->mii_statchg = wm_gmii_statchg;
   9191 
   9192 	/* get PHY control from SMBus to PCIe */
   9193 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9194 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9195 		wm_smbustopci(sc);
   9196 
   9197 	wm_gmii_reset(sc);
   9198 
   9199 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9200 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9201 	    wm_gmii_mediastatus);
   9202 
   9203 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9204 	    || (sc->sc_type == WM_T_82580)
   9205 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9206 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9207 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9208 			/* Attach only one port */
   9209 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9210 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9211 		} else {
   9212 			int i, id;
   9213 			uint32_t ctrl_ext;
   9214 
   9215 			id = wm_get_phy_id_82575(sc);
   9216 			if (id != -1) {
   9217 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9218 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9219 			}
   9220 			if ((id == -1)
   9221 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9222 				/* Power on sgmii phy if it is disabled */
   9223 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9224 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9225 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9226 				CSR_WRITE_FLUSH(sc);
   9227 				delay(300*1000); /* XXX too long */
   9228 
   9229 				/* from 1 to 8 */
   9230 				for (i = 1; i < 8; i++)
   9231 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9232 					    0xffffffff, i, MII_OFFSET_ANY,
   9233 					    MIIF_DOPAUSE);
   9234 
   9235 				/* restore previous sfp cage power state */
   9236 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9237 			}
   9238 		}
   9239 	} else {
   9240 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9241 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9242 	}
   9243 
   9244 	/*
   9245 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9246 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9247 	 */
   9248 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9249 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9250 		wm_set_mdio_slow_mode_hv(sc);
   9251 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9252 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9253 	}
   9254 
   9255 	/*
   9256 	 * (For ICH8 variants)
   9257 	 * If PHY detection failed, use BM's r/w function and retry.
   9258 	 */
   9259 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9260 		/* if failed, retry with *_bm_* */
   9261 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9262 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9263 		    sc->sc_phytype);
   9264 		sc->sc_phytype = WMPHY_BM;
   9265 		mii->mii_readreg = wm_gmii_bm_readreg;
   9266 		mii->mii_writereg = wm_gmii_bm_writereg;
   9267 
   9268 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9269 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9270 	}
   9271 
   9272 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9273 		/* Any PHY wasn't find */
   9274 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9275 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9276 		sc->sc_phytype = WMPHY_NONE;
   9277 	} else {
   9278 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9279 
   9280 		/*
   9281 		 * PHY Found! Check PHY type again by the second call of
   9282 		 * wm_mii_setup_phytype.
   9283 		 */
   9284 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9285 		    child->mii_mpd_model);
   9286 
   9287 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9288 	}
   9289 }
   9290 
   9291 /*
   9292  * wm_gmii_mediachange:	[ifmedia interface function]
   9293  *
   9294  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9295  */
   9296 static int
   9297 wm_gmii_mediachange(struct ifnet *ifp)
   9298 {
   9299 	struct wm_softc *sc = ifp->if_softc;
   9300 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9301 	int rc;
   9302 
   9303 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9304 		device_xname(sc->sc_dev), __func__));
   9305 	if ((ifp->if_flags & IFF_UP) == 0)
   9306 		return 0;
   9307 
   9308 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9309 	sc->sc_ctrl |= CTRL_SLU;
   9310 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9311 	    || (sc->sc_type > WM_T_82543)) {
   9312 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9313 	} else {
   9314 		sc->sc_ctrl &= ~CTRL_ASDE;
   9315 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9316 		if (ife->ifm_media & IFM_FDX)
   9317 			sc->sc_ctrl |= CTRL_FD;
   9318 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9319 		case IFM_10_T:
   9320 			sc->sc_ctrl |= CTRL_SPEED_10;
   9321 			break;
   9322 		case IFM_100_TX:
   9323 			sc->sc_ctrl |= CTRL_SPEED_100;
   9324 			break;
   9325 		case IFM_1000_T:
   9326 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9327 			break;
   9328 		default:
   9329 			panic("wm_gmii_mediachange: bad media 0x%x",
   9330 			    ife->ifm_media);
   9331 		}
   9332 	}
   9333 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9334 	if (sc->sc_type <= WM_T_82543)
   9335 		wm_gmii_reset(sc);
   9336 
   9337 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9338 		return 0;
   9339 	return rc;
   9340 }
   9341 
   9342 /*
   9343  * wm_gmii_mediastatus:	[ifmedia interface function]
   9344  *
   9345  *	Get the current interface media status on a 1000BASE-T device.
   9346  */
   9347 static void
   9348 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9349 {
   9350 	struct wm_softc *sc = ifp->if_softc;
   9351 
   9352 	ether_mediastatus(ifp, ifmr);
   9353 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9354 	    | sc->sc_flowflags;
   9355 }
   9356 
   9357 #define	MDI_IO		CTRL_SWDPIN(2)
   9358 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9359 #define	MDI_CLK		CTRL_SWDPIN(3)
   9360 
   9361 static void
   9362 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9363 {
   9364 	uint32_t i, v;
   9365 
   9366 	v = CSR_READ(sc, WMREG_CTRL);
   9367 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9368 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9369 
   9370 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9371 		if (data & i)
   9372 			v |= MDI_IO;
   9373 		else
   9374 			v &= ~MDI_IO;
   9375 		CSR_WRITE(sc, WMREG_CTRL, v);
   9376 		CSR_WRITE_FLUSH(sc);
   9377 		delay(10);
   9378 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9379 		CSR_WRITE_FLUSH(sc);
   9380 		delay(10);
   9381 		CSR_WRITE(sc, WMREG_CTRL, v);
   9382 		CSR_WRITE_FLUSH(sc);
   9383 		delay(10);
   9384 	}
   9385 }
   9386 
   9387 static uint32_t
   9388 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9389 {
   9390 	uint32_t v, i, data = 0;
   9391 
   9392 	v = CSR_READ(sc, WMREG_CTRL);
   9393 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9394 	v |= CTRL_SWDPIO(3);
   9395 
   9396 	CSR_WRITE(sc, WMREG_CTRL, v);
   9397 	CSR_WRITE_FLUSH(sc);
   9398 	delay(10);
   9399 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9400 	CSR_WRITE_FLUSH(sc);
   9401 	delay(10);
   9402 	CSR_WRITE(sc, WMREG_CTRL, v);
   9403 	CSR_WRITE_FLUSH(sc);
   9404 	delay(10);
   9405 
   9406 	for (i = 0; i < 16; i++) {
   9407 		data <<= 1;
   9408 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9409 		CSR_WRITE_FLUSH(sc);
   9410 		delay(10);
   9411 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9412 			data |= 1;
   9413 		CSR_WRITE(sc, WMREG_CTRL, v);
   9414 		CSR_WRITE_FLUSH(sc);
   9415 		delay(10);
   9416 	}
   9417 
   9418 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9419 	CSR_WRITE_FLUSH(sc);
   9420 	delay(10);
   9421 	CSR_WRITE(sc, WMREG_CTRL, v);
   9422 	CSR_WRITE_FLUSH(sc);
   9423 	delay(10);
   9424 
   9425 	return data;
   9426 }
   9427 
   9428 #undef MDI_IO
   9429 #undef MDI_DIR
   9430 #undef MDI_CLK
   9431 
   9432 /*
   9433  * wm_gmii_i82543_readreg:	[mii interface function]
   9434  *
   9435  *	Read a PHY register on the GMII (i82543 version).
   9436  */
   9437 static int
   9438 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9439 {
   9440 	struct wm_softc *sc = device_private(self);
   9441 	int rv;
   9442 
   9443 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9444 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9445 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9446 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9447 
   9448 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9449 	    device_xname(sc->sc_dev), phy, reg, rv));
   9450 
   9451 	return rv;
   9452 }
   9453 
   9454 /*
   9455  * wm_gmii_i82543_writereg:	[mii interface function]
   9456  *
   9457  *	Write a PHY register on the GMII (i82543 version).
   9458  */
   9459 static void
   9460 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9461 {
   9462 	struct wm_softc *sc = device_private(self);
   9463 
   9464 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9465 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9466 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9467 	    (MII_COMMAND_START << 30), 32);
   9468 }
   9469 
   9470 /*
   9471  * wm_gmii_mdic_readreg:	[mii interface function]
   9472  *
   9473  *	Read a PHY register on the GMII.
   9474  */
   9475 static int
   9476 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9477 {
   9478 	struct wm_softc *sc = device_private(self);
   9479 	uint32_t mdic = 0;
   9480 	int i, rv;
   9481 
   9482 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9483 	    MDIC_REGADD(reg));
   9484 
   9485 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9486 		mdic = CSR_READ(sc, WMREG_MDIC);
   9487 		if (mdic & MDIC_READY)
   9488 			break;
   9489 		delay(50);
   9490 	}
   9491 
   9492 	if ((mdic & MDIC_READY) == 0) {
   9493 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9494 		    device_xname(sc->sc_dev), phy, reg);
   9495 		rv = 0;
   9496 	} else if (mdic & MDIC_E) {
   9497 #if 0 /* This is normal if no PHY is present. */
   9498 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9499 		    device_xname(sc->sc_dev), phy, reg);
   9500 #endif
   9501 		rv = 0;
   9502 	} else {
   9503 		rv = MDIC_DATA(mdic);
   9504 		if (rv == 0xffff)
   9505 			rv = 0;
   9506 	}
   9507 
   9508 	return rv;
   9509 }
   9510 
   9511 /*
   9512  * wm_gmii_mdic_writereg:	[mii interface function]
   9513  *
   9514  *	Write a PHY register on the GMII.
   9515  */
   9516 static void
   9517 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9518 {
   9519 	struct wm_softc *sc = device_private(self);
   9520 	uint32_t mdic = 0;
   9521 	int i;
   9522 
   9523 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9524 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9525 
   9526 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9527 		mdic = CSR_READ(sc, WMREG_MDIC);
   9528 		if (mdic & MDIC_READY)
   9529 			break;
   9530 		delay(50);
   9531 	}
   9532 
   9533 	if ((mdic & MDIC_READY) == 0)
   9534 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9535 		    device_xname(sc->sc_dev), phy, reg);
   9536 	else if (mdic & MDIC_E)
   9537 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9538 		    device_xname(sc->sc_dev), phy, reg);
   9539 }
   9540 
   9541 /*
   9542  * wm_gmii_i82544_readreg:	[mii interface function]
   9543  *
   9544  *	Read a PHY register on the GMII.
   9545  */
   9546 static int
   9547 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9548 {
   9549 	struct wm_softc *sc = device_private(self);
   9550 	int rv;
   9551 
   9552 	if (sc->phy.acquire(sc)) {
   9553 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9554 		    __func__);
   9555 		return 0;
   9556 	}
   9557 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9558 	sc->phy.release(sc);
   9559 
   9560 	return rv;
   9561 }
   9562 
   9563 /*
   9564  * wm_gmii_i82544_writereg:	[mii interface function]
   9565  *
   9566  *	Write a PHY register on the GMII.
   9567  */
   9568 static void
   9569 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9570 {
   9571 	struct wm_softc *sc = device_private(self);
   9572 
   9573 	if (sc->phy.acquire(sc)) {
   9574 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9575 		    __func__);
   9576 	}
   9577 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9578 	sc->phy.release(sc);
   9579 }
   9580 
   9581 /*
   9582  * wm_gmii_i80003_readreg:	[mii interface function]
   9583  *
   9584  *	Read a PHY register on the kumeran
   9585  * This could be handled by the PHY layer if we didn't have to lock the
   9586  * ressource ...
   9587  */
   9588 static int
   9589 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9590 {
   9591 	struct wm_softc *sc = device_private(self);
   9592 	int rv;
   9593 
   9594 	if (phy != 1) /* only one PHY on kumeran bus */
   9595 		return 0;
   9596 
   9597 	if (sc->phy.acquire(sc)) {
   9598 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9599 		    __func__);
   9600 		return 0;
   9601 	}
   9602 
   9603 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9604 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9605 		    reg >> GG82563_PAGE_SHIFT);
   9606 	} else {
   9607 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9608 		    reg >> GG82563_PAGE_SHIFT);
   9609 	}
   9610 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9611 	delay(200);
   9612 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9613 	delay(200);
   9614 	sc->phy.release(sc);
   9615 
   9616 	return rv;
   9617 }
   9618 
   9619 /*
   9620  * wm_gmii_i80003_writereg:	[mii interface function]
   9621  *
   9622  *	Write a PHY register on the kumeran.
   9623  * This could be handled by the PHY layer if we didn't have to lock the
   9624  * ressource ...
   9625  */
   9626 static void
   9627 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9628 {
   9629 	struct wm_softc *sc = device_private(self);
   9630 
   9631 	if (phy != 1) /* only one PHY on kumeran bus */
   9632 		return;
   9633 
   9634 	if (sc->phy.acquire(sc)) {
   9635 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9636 		    __func__);
   9637 		return;
   9638 	}
   9639 
   9640 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9641 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9642 		    reg >> GG82563_PAGE_SHIFT);
   9643 	} else {
   9644 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9645 		    reg >> GG82563_PAGE_SHIFT);
   9646 	}
   9647 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9648 	delay(200);
   9649 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9650 	delay(200);
   9651 
   9652 	sc->phy.release(sc);
   9653 }
   9654 
   9655 /*
   9656  * wm_gmii_bm_readreg:	[mii interface function]
   9657  *
   9658  *	Read a PHY register on the kumeran
   9659  * This could be handled by the PHY layer if we didn't have to lock the
   9660  * ressource ...
   9661  */
   9662 static int
   9663 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9664 {
   9665 	struct wm_softc *sc = device_private(self);
   9666 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9667 	uint16_t val;
   9668 	int rv;
   9669 
   9670 	if (sc->phy.acquire(sc)) {
   9671 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9672 		    __func__);
   9673 		return 0;
   9674 	}
   9675 
   9676 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9677 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9678 		    || (reg == 31)) ? 1 : phy;
   9679 	/* Page 800 works differently than the rest so it has its own func */
   9680 	if (page == BM_WUC_PAGE) {
   9681 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9682 		rv = val;
   9683 		goto release;
   9684 	}
   9685 
   9686 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9687 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9688 		    && (sc->sc_type != WM_T_82583))
   9689 			wm_gmii_mdic_writereg(self, phy,
   9690 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9691 		else
   9692 			wm_gmii_mdic_writereg(self, phy,
   9693 			    BME1000_PHY_PAGE_SELECT, page);
   9694 	}
   9695 
   9696 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9697 
   9698 release:
   9699 	sc->phy.release(sc);
   9700 	return rv;
   9701 }
   9702 
   9703 /*
   9704  * wm_gmii_bm_writereg:	[mii interface function]
   9705  *
   9706  *	Write a PHY register on the kumeran.
   9707  * This could be handled by the PHY layer if we didn't have to lock the
   9708  * ressource ...
   9709  */
   9710 static void
   9711 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9712 {
   9713 	struct wm_softc *sc = device_private(self);
   9714 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9715 
   9716 	if (sc->phy.acquire(sc)) {
   9717 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9718 		    __func__);
   9719 		return;
   9720 	}
   9721 
   9722 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9723 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9724 		    || (reg == 31)) ? 1 : phy;
   9725 	/* Page 800 works differently than the rest so it has its own func */
   9726 	if (page == BM_WUC_PAGE) {
   9727 		uint16_t tmp;
   9728 
   9729 		tmp = val;
   9730 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9731 		goto release;
   9732 	}
   9733 
   9734 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9735 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9736 		    && (sc->sc_type != WM_T_82583))
   9737 			wm_gmii_mdic_writereg(self, phy,
   9738 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9739 		else
   9740 			wm_gmii_mdic_writereg(self, phy,
   9741 			    BME1000_PHY_PAGE_SELECT, page);
   9742 	}
   9743 
   9744 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9745 
   9746 release:
   9747 	sc->phy.release(sc);
   9748 }
   9749 
   9750 static void
   9751 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9752 {
   9753 	struct wm_softc *sc = device_private(self);
   9754 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9755 	uint16_t wuce, reg;
   9756 
   9757 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9758 		device_xname(sc->sc_dev), __func__));
   9759 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9760 	if (sc->sc_type == WM_T_PCH) {
   9761 		/* XXX e1000 driver do nothing... why? */
   9762 	}
   9763 
   9764 	/*
   9765 	 * 1) Enable PHY wakeup register first.
   9766 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9767 	 */
   9768 
   9769 	/* Set page 769 */
   9770 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9771 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9772 
   9773 	/* Read WUCE and save it */
   9774 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9775 
   9776 	reg = wuce | BM_WUC_ENABLE_BIT;
   9777 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9778 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9779 
   9780 	/* Select page 800 */
   9781 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9782 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9783 
   9784 	/*
   9785 	 * 2) Access PHY wakeup register.
   9786 	 * See e1000_access_phy_wakeup_reg_bm.
   9787 	 */
   9788 
   9789 	/* Write page 800 */
   9790 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9791 
   9792 	if (rd)
   9793 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9794 	else
   9795 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9796 
   9797 	/*
   9798 	 * 3) Disable PHY wakeup register.
   9799 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9800 	 */
   9801 	/* Set page 769 */
   9802 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9803 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9804 
   9805 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9806 }
   9807 
   9808 /*
   9809  * wm_gmii_hv_readreg:	[mii interface function]
   9810  *
   9811  *	Read a PHY register on the kumeran
   9812  * This could be handled by the PHY layer if we didn't have to lock the
   9813  * ressource ...
   9814  */
   9815 static int
   9816 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9817 {
   9818 	struct wm_softc *sc = device_private(self);
   9819 	int rv;
   9820 
   9821 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9822 		device_xname(sc->sc_dev), __func__));
   9823 	if (sc->phy.acquire(sc)) {
   9824 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9825 		    __func__);
   9826 		return 0;
   9827 	}
   9828 
   9829 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9830 	sc->phy.release(sc);
   9831 	return rv;
   9832 }
   9833 
   9834 static int
   9835 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9836 {
   9837 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9838 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9839 	uint16_t val;
   9840 	int rv;
   9841 
   9842 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9843 
   9844 	/* Page 800 works differently than the rest so it has its own func */
   9845 	if (page == BM_WUC_PAGE) {
   9846 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9847 		return val;
   9848 	}
   9849 
   9850 	/*
   9851 	 * Lower than page 768 works differently than the rest so it has its
   9852 	 * own func
   9853 	 */
   9854 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9855 		printf("gmii_hv_readreg!!!\n");
   9856 		return 0;
   9857 	}
   9858 
   9859 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9860 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9861 		    page << BME1000_PAGE_SHIFT);
   9862 	}
   9863 
   9864 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9865 	return rv;
   9866 }
   9867 
   9868 /*
   9869  * wm_gmii_hv_writereg:	[mii interface function]
   9870  *
   9871  *	Write a PHY register on the kumeran.
   9872  * This could be handled by the PHY layer if we didn't have to lock the
   9873  * ressource ...
   9874  */
   9875 static void
   9876 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9877 {
   9878 	struct wm_softc *sc = device_private(self);
   9879 
   9880 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9881 		device_xname(sc->sc_dev), __func__));
   9882 
   9883 	if (sc->phy.acquire(sc)) {
   9884 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9885 		    __func__);
   9886 		return;
   9887 	}
   9888 
   9889 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9890 	sc->phy.release(sc);
   9891 }
   9892 
   9893 static void
   9894 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9895 {
   9896 	struct wm_softc *sc = device_private(self);
   9897 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9898 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9899 
   9900 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9901 
   9902 	/* Page 800 works differently than the rest so it has its own func */
   9903 	if (page == BM_WUC_PAGE) {
   9904 		uint16_t tmp;
   9905 
   9906 		tmp = val;
   9907 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9908 		return;
   9909 	}
   9910 
   9911 	/*
   9912 	 * Lower than page 768 works differently than the rest so it has its
   9913 	 * own func
   9914 	 */
   9915 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9916 		printf("gmii_hv_writereg!!!\n");
   9917 		return;
   9918 	}
   9919 
   9920 	{
   9921 		/*
   9922 		 * XXX Workaround MDIO accesses being disabled after entering
   9923 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9924 		 * register is set)
   9925 		 */
   9926 		if (sc->sc_phytype == WMPHY_82578) {
   9927 			struct mii_softc *child;
   9928 
   9929 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9930 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9931 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9932 			    && ((val & (1 << 11)) != 0)) {
   9933 				printf("XXX need workaround\n");
   9934 			}
   9935 		}
   9936 
   9937 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9938 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9939 			    page << BME1000_PAGE_SHIFT);
   9940 		}
   9941 	}
   9942 
   9943 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9944 }
   9945 
   9946 /*
   9947  * wm_gmii_82580_readreg:	[mii interface function]
   9948  *
   9949  *	Read a PHY register on the 82580 and I350.
   9950  * This could be handled by the PHY layer if we didn't have to lock the
   9951  * ressource ...
   9952  */
   9953 static int
   9954 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9955 {
   9956 	struct wm_softc *sc = device_private(self);
   9957 	int rv;
   9958 
   9959 	if (sc->phy.acquire(sc) != 0) {
   9960 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9961 		    __func__);
   9962 		return 0;
   9963 	}
   9964 
   9965 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9966 
   9967 	sc->phy.release(sc);
   9968 	return rv;
   9969 }
   9970 
   9971 /*
   9972  * wm_gmii_82580_writereg:	[mii interface function]
   9973  *
   9974  *	Write a PHY register on the 82580 and I350.
   9975  * This could be handled by the PHY layer if we didn't have to lock the
   9976  * ressource ...
   9977  */
   9978 static void
   9979 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9980 {
   9981 	struct wm_softc *sc = device_private(self);
   9982 
   9983 	if (sc->phy.acquire(sc) != 0) {
   9984 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9985 		    __func__);
   9986 		return;
   9987 	}
   9988 
   9989 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9990 
   9991 	sc->phy.release(sc);
   9992 }
   9993 
   9994 /*
   9995  * wm_gmii_gs40g_readreg:	[mii interface function]
   9996  *
   9997  *	Read a PHY register on the I2100 and I211.
   9998  * This could be handled by the PHY layer if we didn't have to lock the
   9999  * ressource ...
   10000  */
   10001 static int
   10002 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10003 {
   10004 	struct wm_softc *sc = device_private(self);
   10005 	int page, offset;
   10006 	int rv;
   10007 
   10008 	/* Acquire semaphore */
   10009 	if (sc->phy.acquire(sc)) {
   10010 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10011 		    __func__);
   10012 		return 0;
   10013 	}
   10014 
   10015 	/* Page select */
   10016 	page = reg >> GS40G_PAGE_SHIFT;
   10017 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10018 
   10019 	/* Read reg */
   10020 	offset = reg & GS40G_OFFSET_MASK;
   10021 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10022 
   10023 	sc->phy.release(sc);
   10024 	return rv;
   10025 }
   10026 
   10027 /*
   10028  * wm_gmii_gs40g_writereg:	[mii interface function]
   10029  *
   10030  *	Write a PHY register on the I210 and I211.
   10031  * This could be handled by the PHY layer if we didn't have to lock the
   10032  * ressource ...
   10033  */
   10034 static void
   10035 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10036 {
   10037 	struct wm_softc *sc = device_private(self);
   10038 	int page, offset;
   10039 
   10040 	/* Acquire semaphore */
   10041 	if (sc->phy.acquire(sc)) {
   10042 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10043 		    __func__);
   10044 		return;
   10045 	}
   10046 
   10047 	/* Page select */
   10048 	page = reg >> GS40G_PAGE_SHIFT;
   10049 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10050 
   10051 	/* Write reg */
   10052 	offset = reg & GS40G_OFFSET_MASK;
   10053 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10054 
   10055 	/* Release semaphore */
   10056 	sc->phy.release(sc);
   10057 }
   10058 
   10059 /*
   10060  * wm_gmii_statchg:	[mii interface function]
   10061  *
   10062  *	Callback from MII layer when media changes.
   10063  */
   10064 static void
   10065 wm_gmii_statchg(struct ifnet *ifp)
   10066 {
   10067 	struct wm_softc *sc = ifp->if_softc;
   10068 	struct mii_data *mii = &sc->sc_mii;
   10069 
   10070 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10071 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10072 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10073 
   10074 	/*
   10075 	 * Get flow control negotiation result.
   10076 	 */
   10077 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10078 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10079 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10080 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10081 	}
   10082 
   10083 	if (sc->sc_flowflags & IFM_FLOW) {
   10084 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10085 			sc->sc_ctrl |= CTRL_TFCE;
   10086 			sc->sc_fcrtl |= FCRTL_XONE;
   10087 		}
   10088 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10089 			sc->sc_ctrl |= CTRL_RFCE;
   10090 	}
   10091 
   10092 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10093 		DPRINTF(WM_DEBUG_LINK,
   10094 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10095 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10096 	} else {
   10097 		DPRINTF(WM_DEBUG_LINK,
   10098 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10099 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10100 	}
   10101 
   10102 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10103 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10104 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10105 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10106 	if (sc->sc_type == WM_T_80003) {
   10107 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10108 		case IFM_1000_T:
   10109 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10110 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10111 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10112 			break;
   10113 		default:
   10114 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10115 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10116 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10117 			break;
   10118 		}
   10119 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10120 	}
   10121 }
   10122 
   10123 /* kumeran related (80003, ICH* and PCH*) */
   10124 
   10125 /*
   10126  * wm_kmrn_readreg:
   10127  *
   10128  *	Read a kumeran register
   10129  */
   10130 static int
   10131 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10132 {
   10133 	int rv;
   10134 
   10135 	if (sc->sc_type == WM_T_80003)
   10136 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10137 	else
   10138 		rv = sc->phy.acquire(sc);
   10139 	if (rv != 0) {
   10140 		aprint_error_dev(sc->sc_dev,
   10141 		    "%s: failed to get semaphore\n", __func__);
   10142 		return 0;
   10143 	}
   10144 
   10145 	rv = wm_kmrn_readreg_locked(sc, reg);
   10146 
   10147 	if (sc->sc_type == WM_T_80003)
   10148 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10149 	else
   10150 		sc->phy.release(sc);
   10151 
   10152 	return rv;
   10153 }
   10154 
   10155 static int
   10156 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10157 {
   10158 	int rv;
   10159 
   10160 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10161 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10162 	    KUMCTRLSTA_REN);
   10163 	CSR_WRITE_FLUSH(sc);
   10164 	delay(2);
   10165 
   10166 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10167 
   10168 	return rv;
   10169 }
   10170 
   10171 /*
   10172  * wm_kmrn_writereg:
   10173  *
   10174  *	Write a kumeran register
   10175  */
   10176 static void
   10177 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10178 {
   10179 	int rv;
   10180 
   10181 	if (sc->sc_type == WM_T_80003)
   10182 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10183 	else
   10184 		rv = sc->phy.acquire(sc);
   10185 	if (rv != 0) {
   10186 		aprint_error_dev(sc->sc_dev,
   10187 		    "%s: failed to get semaphore\n", __func__);
   10188 		return;
   10189 	}
   10190 
   10191 	wm_kmrn_writereg_locked(sc, reg, val);
   10192 
   10193 	if (sc->sc_type == WM_T_80003)
   10194 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10195 	else
   10196 		sc->phy.release(sc);
   10197 }
   10198 
   10199 static void
   10200 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10201 {
   10202 
   10203 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10204 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10205 	    (val & KUMCTRLSTA_MASK));
   10206 }
   10207 
   10208 /* SGMII related */
   10209 
   10210 /*
   10211  * wm_sgmii_uses_mdio
   10212  *
   10213  * Check whether the transaction is to the internal PHY or the external
   10214  * MDIO interface. Return true if it's MDIO.
   10215  */
   10216 static bool
   10217 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10218 {
   10219 	uint32_t reg;
   10220 	bool ismdio = false;
   10221 
   10222 	switch (sc->sc_type) {
   10223 	case WM_T_82575:
   10224 	case WM_T_82576:
   10225 		reg = CSR_READ(sc, WMREG_MDIC);
   10226 		ismdio = ((reg & MDIC_DEST) != 0);
   10227 		break;
   10228 	case WM_T_82580:
   10229 	case WM_T_I350:
   10230 	case WM_T_I354:
   10231 	case WM_T_I210:
   10232 	case WM_T_I211:
   10233 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10234 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10235 		break;
   10236 	default:
   10237 		break;
   10238 	}
   10239 
   10240 	return ismdio;
   10241 }
   10242 
   10243 /*
   10244  * wm_sgmii_readreg:	[mii interface function]
   10245  *
   10246  *	Read a PHY register on the SGMII
   10247  * This could be handled by the PHY layer if we didn't have to lock the
   10248  * ressource ...
   10249  */
   10250 static int
   10251 wm_sgmii_readreg(device_t self, int phy, int reg)
   10252 {
   10253 	struct wm_softc *sc = device_private(self);
   10254 	uint32_t i2ccmd;
   10255 	int i, rv;
   10256 
   10257 	if (sc->phy.acquire(sc)) {
   10258 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10259 		    __func__);
   10260 		return 0;
   10261 	}
   10262 
   10263 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10264 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10265 	    | I2CCMD_OPCODE_READ;
   10266 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10267 
   10268 	/* Poll the ready bit */
   10269 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10270 		delay(50);
   10271 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10272 		if (i2ccmd & I2CCMD_READY)
   10273 			break;
   10274 	}
   10275 	if ((i2ccmd & I2CCMD_READY) == 0)
   10276 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10277 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10278 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10279 
   10280 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10281 
   10282 	sc->phy.release(sc);
   10283 	return rv;
   10284 }
   10285 
   10286 /*
   10287  * wm_sgmii_writereg:	[mii interface function]
   10288  *
   10289  *	Write a PHY register on the SGMII.
   10290  * This could be handled by the PHY layer if we didn't have to lock the
   10291  * ressource ...
   10292  */
   10293 static void
   10294 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10295 {
   10296 	struct wm_softc *sc = device_private(self);
   10297 	uint32_t i2ccmd;
   10298 	int i;
   10299 	int val_swapped;
   10300 
   10301 	if (sc->phy.acquire(sc) != 0) {
   10302 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10303 		    __func__);
   10304 		return;
   10305 	}
   10306 	/* Swap the data bytes for the I2C interface */
   10307 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10308 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10309 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10310 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10311 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10312 
   10313 	/* Poll the ready bit */
   10314 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10315 		delay(50);
   10316 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10317 		if (i2ccmd & I2CCMD_READY)
   10318 			break;
   10319 	}
   10320 	if ((i2ccmd & I2CCMD_READY) == 0)
   10321 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10322 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10323 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10324 
   10325 	sc->phy.release(sc);
   10326 }
   10327 
   10328 /* TBI related */
   10329 
   10330 /*
   10331  * wm_tbi_mediainit:
   10332  *
   10333  *	Initialize media for use on 1000BASE-X devices.
   10334  */
   10335 static void
   10336 wm_tbi_mediainit(struct wm_softc *sc)
   10337 {
   10338 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10339 	const char *sep = "";
   10340 
   10341 	if (sc->sc_type < WM_T_82543)
   10342 		sc->sc_tipg = TIPG_WM_DFLT;
   10343 	else
   10344 		sc->sc_tipg = TIPG_LG_DFLT;
   10345 
   10346 	sc->sc_tbi_serdes_anegticks = 5;
   10347 
   10348 	/* Initialize our media structures */
   10349 	sc->sc_mii.mii_ifp = ifp;
   10350 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10351 
   10352 	if ((sc->sc_type >= WM_T_82575)
   10353 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10354 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10355 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10356 	else
   10357 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10358 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10359 
   10360 	/*
   10361 	 * SWD Pins:
   10362 	 *
   10363 	 *	0 = Link LED (output)
   10364 	 *	1 = Loss Of Signal (input)
   10365 	 */
   10366 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10367 
   10368 	/* XXX Perhaps this is only for TBI */
   10369 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10370 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10371 
   10372 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10373 		sc->sc_ctrl &= ~CTRL_LRST;
   10374 
   10375 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10376 
   10377 #define	ADD(ss, mm, dd)							\
   10378 do {									\
   10379 	aprint_normal("%s%s", sep, ss);					\
   10380 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10381 	sep = ", ";							\
   10382 } while (/*CONSTCOND*/0)
   10383 
   10384 	aprint_normal_dev(sc->sc_dev, "");
   10385 
   10386 	if (sc->sc_type == WM_T_I354) {
   10387 		uint32_t status;
   10388 
   10389 		status = CSR_READ(sc, WMREG_STATUS);
   10390 		if (((status & STATUS_2P5_SKU) != 0)
   10391 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10392 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10393 		} else
   10394 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10395 	} else if (sc->sc_type == WM_T_82545) {
   10396 		/* Only 82545 is LX (XXX except SFP) */
   10397 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10398 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10399 	} else {
   10400 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10401 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10402 	}
   10403 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10404 	aprint_normal("\n");
   10405 
   10406 #undef ADD
   10407 
   10408 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10409 }
   10410 
   10411 /*
   10412  * wm_tbi_mediachange:	[ifmedia interface function]
   10413  *
   10414  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10415  */
   10416 static int
   10417 wm_tbi_mediachange(struct ifnet *ifp)
   10418 {
   10419 	struct wm_softc *sc = ifp->if_softc;
   10420 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10421 	uint32_t status;
   10422 	int i;
   10423 
   10424 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10425 		/* XXX need some work for >= 82571 and < 82575 */
   10426 		if (sc->sc_type < WM_T_82575)
   10427 			return 0;
   10428 	}
   10429 
   10430 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10431 	    || (sc->sc_type >= WM_T_82575))
   10432 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10433 
   10434 	sc->sc_ctrl &= ~CTRL_LRST;
   10435 	sc->sc_txcw = TXCW_ANE;
   10436 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10437 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10438 	else if (ife->ifm_media & IFM_FDX)
   10439 		sc->sc_txcw |= TXCW_FD;
   10440 	else
   10441 		sc->sc_txcw |= TXCW_HD;
   10442 
   10443 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10444 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10445 
   10446 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10447 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10448 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10449 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10450 	CSR_WRITE_FLUSH(sc);
   10451 	delay(1000);
   10452 
   10453 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10454 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10455 
   10456 	/*
   10457 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10458 	 * optics detect a signal, 0 if they don't.
   10459 	 */
   10460 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10461 		/* Have signal; wait for the link to come up. */
   10462 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10463 			delay(10000);
   10464 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10465 				break;
   10466 		}
   10467 
   10468 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10469 			    device_xname(sc->sc_dev),i));
   10470 
   10471 		status = CSR_READ(sc, WMREG_STATUS);
   10472 		DPRINTF(WM_DEBUG_LINK,
   10473 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10474 			device_xname(sc->sc_dev),status, STATUS_LU));
   10475 		if (status & STATUS_LU) {
   10476 			/* Link is up. */
   10477 			DPRINTF(WM_DEBUG_LINK,
   10478 			    ("%s: LINK: set media -> link up %s\n",
   10479 			    device_xname(sc->sc_dev),
   10480 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10481 
   10482 			/*
   10483 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10484 			 * so we should update sc->sc_ctrl
   10485 			 */
   10486 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10487 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10488 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10489 			if (status & STATUS_FD)
   10490 				sc->sc_tctl |=
   10491 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10492 			else
   10493 				sc->sc_tctl |=
   10494 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10495 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10496 				sc->sc_fcrtl |= FCRTL_XONE;
   10497 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10498 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10499 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10500 				      sc->sc_fcrtl);
   10501 			sc->sc_tbi_linkup = 1;
   10502 		} else {
   10503 			if (i == WM_LINKUP_TIMEOUT)
   10504 				wm_check_for_link(sc);
   10505 			/* Link is down. */
   10506 			DPRINTF(WM_DEBUG_LINK,
   10507 			    ("%s: LINK: set media -> link down\n",
   10508 			    device_xname(sc->sc_dev)));
   10509 			sc->sc_tbi_linkup = 0;
   10510 		}
   10511 	} else {
   10512 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10513 		    device_xname(sc->sc_dev)));
   10514 		sc->sc_tbi_linkup = 0;
   10515 	}
   10516 
   10517 	wm_tbi_serdes_set_linkled(sc);
   10518 
   10519 	return 0;
   10520 }
   10521 
   10522 /*
   10523  * wm_tbi_mediastatus:	[ifmedia interface function]
   10524  *
   10525  *	Get the current interface media status on a 1000BASE-X device.
   10526  */
   10527 static void
   10528 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10529 {
   10530 	struct wm_softc *sc = ifp->if_softc;
   10531 	uint32_t ctrl, status;
   10532 
   10533 	ifmr->ifm_status = IFM_AVALID;
   10534 	ifmr->ifm_active = IFM_ETHER;
   10535 
   10536 	status = CSR_READ(sc, WMREG_STATUS);
   10537 	if ((status & STATUS_LU) == 0) {
   10538 		ifmr->ifm_active |= IFM_NONE;
   10539 		return;
   10540 	}
   10541 
   10542 	ifmr->ifm_status |= IFM_ACTIVE;
   10543 	/* Only 82545 is LX */
   10544 	if (sc->sc_type == WM_T_82545)
   10545 		ifmr->ifm_active |= IFM_1000_LX;
   10546 	else
   10547 		ifmr->ifm_active |= IFM_1000_SX;
   10548 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10549 		ifmr->ifm_active |= IFM_FDX;
   10550 	else
   10551 		ifmr->ifm_active |= IFM_HDX;
   10552 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10553 	if (ctrl & CTRL_RFCE)
   10554 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10555 	if (ctrl & CTRL_TFCE)
   10556 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10557 }
   10558 
   10559 /* XXX TBI only */
   10560 static int
   10561 wm_check_for_link(struct wm_softc *sc)
   10562 {
   10563 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10564 	uint32_t rxcw;
   10565 	uint32_t ctrl;
   10566 	uint32_t status;
   10567 	uint32_t sig;
   10568 
   10569 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10570 		/* XXX need some work for >= 82571 */
   10571 		if (sc->sc_type >= WM_T_82571) {
   10572 			sc->sc_tbi_linkup = 1;
   10573 			return 0;
   10574 		}
   10575 	}
   10576 
   10577 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10578 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10579 	status = CSR_READ(sc, WMREG_STATUS);
   10580 
   10581 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10582 
   10583 	DPRINTF(WM_DEBUG_LINK,
   10584 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10585 		device_xname(sc->sc_dev), __func__,
   10586 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10587 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10588 
   10589 	/*
   10590 	 * SWDPIN   LU RXCW
   10591 	 *      0    0    0
   10592 	 *      0    0    1	(should not happen)
   10593 	 *      0    1    0	(should not happen)
   10594 	 *      0    1    1	(should not happen)
   10595 	 *      1    0    0	Disable autonego and force linkup
   10596 	 *      1    0    1	got /C/ but not linkup yet
   10597 	 *      1    1    0	(linkup)
   10598 	 *      1    1    1	If IFM_AUTO, back to autonego
   10599 	 *
   10600 	 */
   10601 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10602 	    && ((status & STATUS_LU) == 0)
   10603 	    && ((rxcw & RXCW_C) == 0)) {
   10604 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10605 			__func__));
   10606 		sc->sc_tbi_linkup = 0;
   10607 		/* Disable auto-negotiation in the TXCW register */
   10608 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10609 
   10610 		/*
   10611 		 * Force link-up and also force full-duplex.
   10612 		 *
   10613 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10614 		 * so we should update sc->sc_ctrl
   10615 		 */
   10616 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10617 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10618 	} else if (((status & STATUS_LU) != 0)
   10619 	    && ((rxcw & RXCW_C) != 0)
   10620 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10621 		sc->sc_tbi_linkup = 1;
   10622 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10623 			__func__));
   10624 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10625 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10626 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10627 	    && ((rxcw & RXCW_C) != 0)) {
   10628 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10629 	} else {
   10630 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10631 			status));
   10632 	}
   10633 
   10634 	return 0;
   10635 }
   10636 
   10637 /*
   10638  * wm_tbi_tick:
   10639  *
   10640  *	Check the link on TBI devices.
   10641  *	This function acts as mii_tick().
   10642  */
   10643 static void
   10644 wm_tbi_tick(struct wm_softc *sc)
   10645 {
   10646 	struct mii_data *mii = &sc->sc_mii;
   10647 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10648 	uint32_t status;
   10649 
   10650 	KASSERT(WM_CORE_LOCKED(sc));
   10651 
   10652 	status = CSR_READ(sc, WMREG_STATUS);
   10653 
   10654 	/* XXX is this needed? */
   10655 	(void)CSR_READ(sc, WMREG_RXCW);
   10656 	(void)CSR_READ(sc, WMREG_CTRL);
   10657 
   10658 	/* set link status */
   10659 	if ((status & STATUS_LU) == 0) {
   10660 		DPRINTF(WM_DEBUG_LINK,
   10661 		    ("%s: LINK: checklink -> down\n",
   10662 			device_xname(sc->sc_dev)));
   10663 		sc->sc_tbi_linkup = 0;
   10664 	} else if (sc->sc_tbi_linkup == 0) {
   10665 		DPRINTF(WM_DEBUG_LINK,
   10666 		    ("%s: LINK: checklink -> up %s\n",
   10667 			device_xname(sc->sc_dev),
   10668 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10669 		sc->sc_tbi_linkup = 1;
   10670 		sc->sc_tbi_serdes_ticks = 0;
   10671 	}
   10672 
   10673 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10674 		goto setled;
   10675 
   10676 	if ((status & STATUS_LU) == 0) {
   10677 		sc->sc_tbi_linkup = 0;
   10678 		/* If the timer expired, retry autonegotiation */
   10679 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10680 		    && (++sc->sc_tbi_serdes_ticks
   10681 			>= sc->sc_tbi_serdes_anegticks)) {
   10682 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10683 			sc->sc_tbi_serdes_ticks = 0;
   10684 			/*
   10685 			 * Reset the link, and let autonegotiation do
   10686 			 * its thing
   10687 			 */
   10688 			sc->sc_ctrl |= CTRL_LRST;
   10689 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10690 			CSR_WRITE_FLUSH(sc);
   10691 			delay(1000);
   10692 			sc->sc_ctrl &= ~CTRL_LRST;
   10693 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10694 			CSR_WRITE_FLUSH(sc);
   10695 			delay(1000);
   10696 			CSR_WRITE(sc, WMREG_TXCW,
   10697 			    sc->sc_txcw & ~TXCW_ANE);
   10698 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10699 		}
   10700 	}
   10701 
   10702 setled:
   10703 	wm_tbi_serdes_set_linkled(sc);
   10704 }
   10705 
   10706 /* SERDES related */
   10707 static void
   10708 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10709 {
   10710 	uint32_t reg;
   10711 
   10712 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10713 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10714 		return;
   10715 
   10716 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10717 	reg |= PCS_CFG_PCS_EN;
   10718 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10719 
   10720 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10721 	reg &= ~CTRL_EXT_SWDPIN(3);
   10722 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10723 	CSR_WRITE_FLUSH(sc);
   10724 }
   10725 
   10726 static int
   10727 wm_serdes_mediachange(struct ifnet *ifp)
   10728 {
   10729 	struct wm_softc *sc = ifp->if_softc;
   10730 	bool pcs_autoneg = true; /* XXX */
   10731 	uint32_t ctrl_ext, pcs_lctl, reg;
   10732 
   10733 	/* XXX Currently, this function is not called on 8257[12] */
   10734 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10735 	    || (sc->sc_type >= WM_T_82575))
   10736 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10737 
   10738 	wm_serdes_power_up_link_82575(sc);
   10739 
   10740 	sc->sc_ctrl |= CTRL_SLU;
   10741 
   10742 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10743 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10744 
   10745 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10746 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10747 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10748 	case CTRL_EXT_LINK_MODE_SGMII:
   10749 		pcs_autoneg = true;
   10750 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10751 		break;
   10752 	case CTRL_EXT_LINK_MODE_1000KX:
   10753 		pcs_autoneg = false;
   10754 		/* FALLTHROUGH */
   10755 	default:
   10756 		if ((sc->sc_type == WM_T_82575)
   10757 		    || (sc->sc_type == WM_T_82576)) {
   10758 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10759 				pcs_autoneg = false;
   10760 		}
   10761 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10762 		    | CTRL_FRCFDX;
   10763 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10764 	}
   10765 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10766 
   10767 	if (pcs_autoneg) {
   10768 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10769 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10770 
   10771 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10772 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10773 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10774 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10775 	} else
   10776 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10777 
   10778 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10779 
   10780 
   10781 	return 0;
   10782 }
   10783 
   10784 static void
   10785 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10786 {
   10787 	struct wm_softc *sc = ifp->if_softc;
   10788 	struct mii_data *mii = &sc->sc_mii;
   10789 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10790 	uint32_t pcs_adv, pcs_lpab, reg;
   10791 
   10792 	ifmr->ifm_status = IFM_AVALID;
   10793 	ifmr->ifm_active = IFM_ETHER;
   10794 
   10795 	/* Check PCS */
   10796 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10797 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10798 		ifmr->ifm_active |= IFM_NONE;
   10799 		sc->sc_tbi_linkup = 0;
   10800 		goto setled;
   10801 	}
   10802 
   10803 	sc->sc_tbi_linkup = 1;
   10804 	ifmr->ifm_status |= IFM_ACTIVE;
   10805 	if (sc->sc_type == WM_T_I354) {
   10806 		uint32_t status;
   10807 
   10808 		status = CSR_READ(sc, WMREG_STATUS);
   10809 		if (((status & STATUS_2P5_SKU) != 0)
   10810 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10811 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10812 		} else
   10813 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10814 	} else {
   10815 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10816 		case PCS_LSTS_SPEED_10:
   10817 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10818 			break;
   10819 		case PCS_LSTS_SPEED_100:
   10820 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10821 			break;
   10822 		case PCS_LSTS_SPEED_1000:
   10823 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10824 			break;
   10825 		default:
   10826 			device_printf(sc->sc_dev, "Unknown speed\n");
   10827 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10828 			break;
   10829 		}
   10830 	}
   10831 	if ((reg & PCS_LSTS_FDX) != 0)
   10832 		ifmr->ifm_active |= IFM_FDX;
   10833 	else
   10834 		ifmr->ifm_active |= IFM_HDX;
   10835 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10836 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10837 		/* Check flow */
   10838 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10839 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10840 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10841 			goto setled;
   10842 		}
   10843 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10844 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10845 		DPRINTF(WM_DEBUG_LINK,
   10846 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10847 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10848 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10849 			mii->mii_media_active |= IFM_FLOW
   10850 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10851 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10852 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10853 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10854 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10855 			mii->mii_media_active |= IFM_FLOW
   10856 			    | IFM_ETH_TXPAUSE;
   10857 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10858 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10859 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10860 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10861 			mii->mii_media_active |= IFM_FLOW
   10862 			    | IFM_ETH_RXPAUSE;
   10863 		}
   10864 	}
   10865 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10866 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10867 setled:
   10868 	wm_tbi_serdes_set_linkled(sc);
   10869 }
   10870 
   10871 /*
   10872  * wm_serdes_tick:
   10873  *
   10874  *	Check the link on serdes devices.
   10875  */
   10876 static void
   10877 wm_serdes_tick(struct wm_softc *sc)
   10878 {
   10879 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10880 	struct mii_data *mii = &sc->sc_mii;
   10881 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10882 	uint32_t reg;
   10883 
   10884 	KASSERT(WM_CORE_LOCKED(sc));
   10885 
   10886 	mii->mii_media_status = IFM_AVALID;
   10887 	mii->mii_media_active = IFM_ETHER;
   10888 
   10889 	/* Check PCS */
   10890 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10891 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10892 		mii->mii_media_status |= IFM_ACTIVE;
   10893 		sc->sc_tbi_linkup = 1;
   10894 		sc->sc_tbi_serdes_ticks = 0;
   10895 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10896 		if ((reg & PCS_LSTS_FDX) != 0)
   10897 			mii->mii_media_active |= IFM_FDX;
   10898 		else
   10899 			mii->mii_media_active |= IFM_HDX;
   10900 	} else {
   10901 		mii->mii_media_status |= IFM_NONE;
   10902 		sc->sc_tbi_linkup = 0;
   10903 		/* If the timer expired, retry autonegotiation */
   10904 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10905 		    && (++sc->sc_tbi_serdes_ticks
   10906 			>= sc->sc_tbi_serdes_anegticks)) {
   10907 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10908 			sc->sc_tbi_serdes_ticks = 0;
   10909 			/* XXX */
   10910 			wm_serdes_mediachange(ifp);
   10911 		}
   10912 	}
   10913 
   10914 	wm_tbi_serdes_set_linkled(sc);
   10915 }
   10916 
   10917 /* SFP related */
   10918 
   10919 static int
   10920 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10921 {
   10922 	uint32_t i2ccmd;
   10923 	int i;
   10924 
   10925 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10926 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10927 
   10928 	/* Poll the ready bit */
   10929 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10930 		delay(50);
   10931 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10932 		if (i2ccmd & I2CCMD_READY)
   10933 			break;
   10934 	}
   10935 	if ((i2ccmd & I2CCMD_READY) == 0)
   10936 		return -1;
   10937 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10938 		return -1;
   10939 
   10940 	*data = i2ccmd & 0x00ff;
   10941 
   10942 	return 0;
   10943 }
   10944 
   10945 static uint32_t
   10946 wm_sfp_get_media_type(struct wm_softc *sc)
   10947 {
   10948 	uint32_t ctrl_ext;
   10949 	uint8_t val = 0;
   10950 	int timeout = 3;
   10951 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10952 	int rv = -1;
   10953 
   10954 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10955 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10956 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10957 	CSR_WRITE_FLUSH(sc);
   10958 
   10959 	/* Read SFP module data */
   10960 	while (timeout) {
   10961 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10962 		if (rv == 0)
   10963 			break;
   10964 		delay(100*1000); /* XXX too big */
   10965 		timeout--;
   10966 	}
   10967 	if (rv != 0)
   10968 		goto out;
   10969 	switch (val) {
   10970 	case SFF_SFP_ID_SFF:
   10971 		aprint_normal_dev(sc->sc_dev,
   10972 		    "Module/Connector soldered to board\n");
   10973 		break;
   10974 	case SFF_SFP_ID_SFP:
   10975 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10976 		break;
   10977 	case SFF_SFP_ID_UNKNOWN:
   10978 		goto out;
   10979 	default:
   10980 		break;
   10981 	}
   10982 
   10983 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10984 	if (rv != 0) {
   10985 		goto out;
   10986 	}
   10987 
   10988 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10989 		mediatype = WM_MEDIATYPE_SERDES;
   10990 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10991 		sc->sc_flags |= WM_F_SGMII;
   10992 		mediatype = WM_MEDIATYPE_COPPER;
   10993 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10994 		sc->sc_flags |= WM_F_SGMII;
   10995 		mediatype = WM_MEDIATYPE_SERDES;
   10996 	}
   10997 
   10998 out:
   10999 	/* Restore I2C interface setting */
   11000 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11001 
   11002 	return mediatype;
   11003 }
   11004 
   11005 /*
   11006  * NVM related.
   11007  * Microwire, SPI (w/wo EERD) and Flash.
   11008  */
   11009 
   11010 /* Both spi and uwire */
   11011 
   11012 /*
   11013  * wm_eeprom_sendbits:
   11014  *
   11015  *	Send a series of bits to the EEPROM.
   11016  */
   11017 static void
   11018 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11019 {
   11020 	uint32_t reg;
   11021 	int x;
   11022 
   11023 	reg = CSR_READ(sc, WMREG_EECD);
   11024 
   11025 	for (x = nbits; x > 0; x--) {
   11026 		if (bits & (1U << (x - 1)))
   11027 			reg |= EECD_DI;
   11028 		else
   11029 			reg &= ~EECD_DI;
   11030 		CSR_WRITE(sc, WMREG_EECD, reg);
   11031 		CSR_WRITE_FLUSH(sc);
   11032 		delay(2);
   11033 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11034 		CSR_WRITE_FLUSH(sc);
   11035 		delay(2);
   11036 		CSR_WRITE(sc, WMREG_EECD, reg);
   11037 		CSR_WRITE_FLUSH(sc);
   11038 		delay(2);
   11039 	}
   11040 }
   11041 
   11042 /*
   11043  * wm_eeprom_recvbits:
   11044  *
   11045  *	Receive a series of bits from the EEPROM.
   11046  */
   11047 static void
   11048 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11049 {
   11050 	uint32_t reg, val;
   11051 	int x;
   11052 
   11053 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11054 
   11055 	val = 0;
   11056 	for (x = nbits; x > 0; x--) {
   11057 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11058 		CSR_WRITE_FLUSH(sc);
   11059 		delay(2);
   11060 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11061 			val |= (1U << (x - 1));
   11062 		CSR_WRITE(sc, WMREG_EECD, reg);
   11063 		CSR_WRITE_FLUSH(sc);
   11064 		delay(2);
   11065 	}
   11066 	*valp = val;
   11067 }
   11068 
   11069 /* Microwire */
   11070 
   11071 /*
   11072  * wm_nvm_read_uwire:
   11073  *
   11074  *	Read a word from the EEPROM using the MicroWire protocol.
   11075  */
   11076 static int
   11077 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11078 {
   11079 	uint32_t reg, val;
   11080 	int i;
   11081 
   11082 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11083 		device_xname(sc->sc_dev), __func__));
   11084 
   11085 	for (i = 0; i < wordcnt; i++) {
   11086 		/* Clear SK and DI. */
   11087 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11088 		CSR_WRITE(sc, WMREG_EECD, reg);
   11089 
   11090 		/*
   11091 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11092 		 * and Xen.
   11093 		 *
   11094 		 * We use this workaround only for 82540 because qemu's
   11095 		 * e1000 act as 82540.
   11096 		 */
   11097 		if (sc->sc_type == WM_T_82540) {
   11098 			reg |= EECD_SK;
   11099 			CSR_WRITE(sc, WMREG_EECD, reg);
   11100 			reg &= ~EECD_SK;
   11101 			CSR_WRITE(sc, WMREG_EECD, reg);
   11102 			CSR_WRITE_FLUSH(sc);
   11103 			delay(2);
   11104 		}
   11105 		/* XXX: end of workaround */
   11106 
   11107 		/* Set CHIP SELECT. */
   11108 		reg |= EECD_CS;
   11109 		CSR_WRITE(sc, WMREG_EECD, reg);
   11110 		CSR_WRITE_FLUSH(sc);
   11111 		delay(2);
   11112 
   11113 		/* Shift in the READ command. */
   11114 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11115 
   11116 		/* Shift in address. */
   11117 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11118 
   11119 		/* Shift out the data. */
   11120 		wm_eeprom_recvbits(sc, &val, 16);
   11121 		data[i] = val & 0xffff;
   11122 
   11123 		/* Clear CHIP SELECT. */
   11124 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11125 		CSR_WRITE(sc, WMREG_EECD, reg);
   11126 		CSR_WRITE_FLUSH(sc);
   11127 		delay(2);
   11128 	}
   11129 
   11130 	return 0;
   11131 }
   11132 
   11133 /* SPI */
   11134 
   11135 /*
   11136  * Set SPI and FLASH related information from the EECD register.
   11137  * For 82541 and 82547, the word size is taken from EEPROM.
   11138  */
   11139 static int
   11140 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11141 {
   11142 	int size;
   11143 	uint32_t reg;
   11144 	uint16_t data;
   11145 
   11146 	reg = CSR_READ(sc, WMREG_EECD);
   11147 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11148 
   11149 	/* Read the size of NVM from EECD by default */
   11150 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11151 	switch (sc->sc_type) {
   11152 	case WM_T_82541:
   11153 	case WM_T_82541_2:
   11154 	case WM_T_82547:
   11155 	case WM_T_82547_2:
   11156 		/* Set dummy value to access EEPROM */
   11157 		sc->sc_nvm_wordsize = 64;
   11158 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11159 		reg = data;
   11160 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11161 		if (size == 0)
   11162 			size = 6; /* 64 word size */
   11163 		else
   11164 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11165 		break;
   11166 	case WM_T_80003:
   11167 	case WM_T_82571:
   11168 	case WM_T_82572:
   11169 	case WM_T_82573: /* SPI case */
   11170 	case WM_T_82574: /* SPI case */
   11171 	case WM_T_82583: /* SPI case */
   11172 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11173 		if (size > 14)
   11174 			size = 14;
   11175 		break;
   11176 	case WM_T_82575:
   11177 	case WM_T_82576:
   11178 	case WM_T_82580:
   11179 	case WM_T_I350:
   11180 	case WM_T_I354:
   11181 	case WM_T_I210:
   11182 	case WM_T_I211:
   11183 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11184 		if (size > 15)
   11185 			size = 15;
   11186 		break;
   11187 	default:
   11188 		aprint_error_dev(sc->sc_dev,
   11189 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11190 		return -1;
   11191 		break;
   11192 	}
   11193 
   11194 	sc->sc_nvm_wordsize = 1 << size;
   11195 
   11196 	return 0;
   11197 }
   11198 
   11199 /*
   11200  * wm_nvm_ready_spi:
   11201  *
   11202  *	Wait for a SPI EEPROM to be ready for commands.
   11203  */
   11204 static int
   11205 wm_nvm_ready_spi(struct wm_softc *sc)
   11206 {
   11207 	uint32_t val;
   11208 	int usec;
   11209 
   11210 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11211 		device_xname(sc->sc_dev), __func__));
   11212 
   11213 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11214 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11215 		wm_eeprom_recvbits(sc, &val, 8);
   11216 		if ((val & SPI_SR_RDY) == 0)
   11217 			break;
   11218 	}
   11219 	if (usec >= SPI_MAX_RETRIES) {
   11220 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11221 		return 1;
   11222 	}
   11223 	return 0;
   11224 }
   11225 
   11226 /*
   11227  * wm_nvm_read_spi:
   11228  *
   11229  *	Read a work from the EEPROM using the SPI protocol.
   11230  */
   11231 static int
   11232 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11233 {
   11234 	uint32_t reg, val;
   11235 	int i;
   11236 	uint8_t opc;
   11237 
   11238 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11239 		device_xname(sc->sc_dev), __func__));
   11240 
   11241 	/* Clear SK and CS. */
   11242 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11243 	CSR_WRITE(sc, WMREG_EECD, reg);
   11244 	CSR_WRITE_FLUSH(sc);
   11245 	delay(2);
   11246 
   11247 	if (wm_nvm_ready_spi(sc))
   11248 		return 1;
   11249 
   11250 	/* Toggle CS to flush commands. */
   11251 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11252 	CSR_WRITE_FLUSH(sc);
   11253 	delay(2);
   11254 	CSR_WRITE(sc, WMREG_EECD, reg);
   11255 	CSR_WRITE_FLUSH(sc);
   11256 	delay(2);
   11257 
   11258 	opc = SPI_OPC_READ;
   11259 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11260 		opc |= SPI_OPC_A8;
   11261 
   11262 	wm_eeprom_sendbits(sc, opc, 8);
   11263 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11264 
   11265 	for (i = 0; i < wordcnt; i++) {
   11266 		wm_eeprom_recvbits(sc, &val, 16);
   11267 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11268 	}
   11269 
   11270 	/* Raise CS and clear SK. */
   11271 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11272 	CSR_WRITE(sc, WMREG_EECD, reg);
   11273 	CSR_WRITE_FLUSH(sc);
   11274 	delay(2);
   11275 
   11276 	return 0;
   11277 }
   11278 
   11279 /* Using with EERD */
   11280 
   11281 static int
   11282 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11283 {
   11284 	uint32_t attempts = 100000;
   11285 	uint32_t i, reg = 0;
   11286 	int32_t done = -1;
   11287 
   11288 	for (i = 0; i < attempts; i++) {
   11289 		reg = CSR_READ(sc, rw);
   11290 
   11291 		if (reg & EERD_DONE) {
   11292 			done = 0;
   11293 			break;
   11294 		}
   11295 		delay(5);
   11296 	}
   11297 
   11298 	return done;
   11299 }
   11300 
   11301 static int
   11302 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11303     uint16_t *data)
   11304 {
   11305 	int i, eerd = 0;
   11306 	int error = 0;
   11307 
   11308 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11309 		device_xname(sc->sc_dev), __func__));
   11310 
   11311 	for (i = 0; i < wordcnt; i++) {
   11312 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11313 
   11314 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11315 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11316 		if (error != 0)
   11317 			break;
   11318 
   11319 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11320 	}
   11321 
   11322 	return error;
   11323 }
   11324 
   11325 /* Flash */
   11326 
   11327 static int
   11328 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11329 {
   11330 	uint32_t eecd;
   11331 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11332 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11333 	uint8_t sig_byte = 0;
   11334 
   11335 	switch (sc->sc_type) {
   11336 	case WM_T_PCH_SPT:
   11337 		/*
   11338 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11339 		 * sector valid bits from the NVM.
   11340 		 */
   11341 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11342 		if ((*bank == 0) || (*bank == 1)) {
   11343 			aprint_error_dev(sc->sc_dev,
   11344 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11345 				*bank);
   11346 			return -1;
   11347 		} else {
   11348 			*bank = *bank - 2;
   11349 			return 0;
   11350 		}
   11351 	case WM_T_ICH8:
   11352 	case WM_T_ICH9:
   11353 		eecd = CSR_READ(sc, WMREG_EECD);
   11354 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11355 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11356 			return 0;
   11357 		}
   11358 		/* FALLTHROUGH */
   11359 	default:
   11360 		/* Default to 0 */
   11361 		*bank = 0;
   11362 
   11363 		/* Check bank 0 */
   11364 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11365 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11366 			*bank = 0;
   11367 			return 0;
   11368 		}
   11369 
   11370 		/* Check bank 1 */
   11371 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11372 		    &sig_byte);
   11373 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11374 			*bank = 1;
   11375 			return 0;
   11376 		}
   11377 	}
   11378 
   11379 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11380 		device_xname(sc->sc_dev)));
   11381 	return -1;
   11382 }
   11383 
   11384 /******************************************************************************
   11385  * This function does initial flash setup so that a new read/write/erase cycle
   11386  * can be started.
   11387  *
   11388  * sc - The pointer to the hw structure
   11389  ****************************************************************************/
   11390 static int32_t
   11391 wm_ich8_cycle_init(struct wm_softc *sc)
   11392 {
   11393 	uint16_t hsfsts;
   11394 	int32_t error = 1;
   11395 	int32_t i     = 0;
   11396 
   11397 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11398 
   11399 	/* May be check the Flash Des Valid bit in Hw status */
   11400 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11401 		return error;
   11402 	}
   11403 
   11404 	/* Clear FCERR in Hw status by writing 1 */
   11405 	/* Clear DAEL in Hw status by writing a 1 */
   11406 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11407 
   11408 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11409 
   11410 	/*
   11411 	 * Either we should have a hardware SPI cycle in progress bit to check
   11412 	 * against, in order to start a new cycle or FDONE bit should be
   11413 	 * changed in the hardware so that it is 1 after harware reset, which
   11414 	 * can then be used as an indication whether a cycle is in progress or
   11415 	 * has been completed .. we should also have some software semaphore
   11416 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11417 	 * threads access to those bits can be sequentiallized or a way so that
   11418 	 * 2 threads dont start the cycle at the same time
   11419 	 */
   11420 
   11421 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11422 		/*
   11423 		 * There is no cycle running at present, so we can start a
   11424 		 * cycle
   11425 		 */
   11426 
   11427 		/* Begin by setting Flash Cycle Done. */
   11428 		hsfsts |= HSFSTS_DONE;
   11429 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11430 		error = 0;
   11431 	} else {
   11432 		/*
   11433 		 * otherwise poll for sometime so the current cycle has a
   11434 		 * chance to end before giving up.
   11435 		 */
   11436 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11437 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11438 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11439 				error = 0;
   11440 				break;
   11441 			}
   11442 			delay(1);
   11443 		}
   11444 		if (error == 0) {
   11445 			/*
   11446 			 * Successful in waiting for previous cycle to timeout,
   11447 			 * now set the Flash Cycle Done.
   11448 			 */
   11449 			hsfsts |= HSFSTS_DONE;
   11450 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11451 		}
   11452 	}
   11453 	return error;
   11454 }
   11455 
   11456 /******************************************************************************
   11457  * This function starts a flash cycle and waits for its completion
   11458  *
   11459  * sc - The pointer to the hw structure
   11460  ****************************************************************************/
   11461 static int32_t
   11462 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11463 {
   11464 	uint16_t hsflctl;
   11465 	uint16_t hsfsts;
   11466 	int32_t error = 1;
   11467 	uint32_t i = 0;
   11468 
   11469 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11470 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11471 	hsflctl |= HSFCTL_GO;
   11472 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11473 
   11474 	/* Wait till FDONE bit is set to 1 */
   11475 	do {
   11476 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11477 		if (hsfsts & HSFSTS_DONE)
   11478 			break;
   11479 		delay(1);
   11480 		i++;
   11481 	} while (i < timeout);
   11482 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11483 		error = 0;
   11484 
   11485 	return error;
   11486 }
   11487 
   11488 /******************************************************************************
   11489  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11490  *
   11491  * sc - The pointer to the hw structure
   11492  * index - The index of the byte or word to read.
   11493  * size - Size of data to read, 1=byte 2=word, 4=dword
   11494  * data - Pointer to the word to store the value read.
   11495  *****************************************************************************/
   11496 static int32_t
   11497 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11498     uint32_t size, uint32_t *data)
   11499 {
   11500 	uint16_t hsfsts;
   11501 	uint16_t hsflctl;
   11502 	uint32_t flash_linear_address;
   11503 	uint32_t flash_data = 0;
   11504 	int32_t error = 1;
   11505 	int32_t count = 0;
   11506 
   11507 	if (size < 1  || size > 4 || data == 0x0 ||
   11508 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11509 		return error;
   11510 
   11511 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11512 	    sc->sc_ich8_flash_base;
   11513 
   11514 	do {
   11515 		delay(1);
   11516 		/* Steps */
   11517 		error = wm_ich8_cycle_init(sc);
   11518 		if (error)
   11519 			break;
   11520 
   11521 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11522 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11523 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11524 		    & HSFCTL_BCOUNT_MASK;
   11525 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11526 		if (sc->sc_type == WM_T_PCH_SPT) {
   11527 			/*
   11528 			 * In SPT, This register is in Lan memory space, not
   11529 			 * flash. Therefore, only 32 bit access is supported.
   11530 			 */
   11531 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11532 			    (uint32_t)hsflctl);
   11533 		} else
   11534 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11535 
   11536 		/*
   11537 		 * Write the last 24 bits of index into Flash Linear address
   11538 		 * field in Flash Address
   11539 		 */
   11540 		/* TODO: TBD maybe check the index against the size of flash */
   11541 
   11542 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11543 
   11544 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11545 
   11546 		/*
   11547 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11548 		 * the whole sequence a few more times, else read in (shift in)
   11549 		 * the Flash Data0, the order is least significant byte first
   11550 		 * msb to lsb
   11551 		 */
   11552 		if (error == 0) {
   11553 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11554 			if (size == 1)
   11555 				*data = (uint8_t)(flash_data & 0x000000FF);
   11556 			else if (size == 2)
   11557 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11558 			else if (size == 4)
   11559 				*data = (uint32_t)flash_data;
   11560 			break;
   11561 		} else {
   11562 			/*
   11563 			 * If we've gotten here, then things are probably
   11564 			 * completely hosed, but if the error condition is
   11565 			 * detected, it won't hurt to give it another try...
   11566 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11567 			 */
   11568 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11569 			if (hsfsts & HSFSTS_ERR) {
   11570 				/* Repeat for some time before giving up. */
   11571 				continue;
   11572 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11573 				break;
   11574 		}
   11575 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11576 
   11577 	return error;
   11578 }
   11579 
   11580 /******************************************************************************
   11581  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11582  *
   11583  * sc - pointer to wm_hw structure
   11584  * index - The index of the byte to read.
   11585  * data - Pointer to a byte to store the value read.
   11586  *****************************************************************************/
   11587 static int32_t
   11588 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11589 {
   11590 	int32_t status;
   11591 	uint32_t word = 0;
   11592 
   11593 	status = wm_read_ich8_data(sc, index, 1, &word);
   11594 	if (status == 0)
   11595 		*data = (uint8_t)word;
   11596 	else
   11597 		*data = 0;
   11598 
   11599 	return status;
   11600 }
   11601 
   11602 /******************************************************************************
   11603  * Reads a word from the NVM using the ICH8 flash access registers.
   11604  *
   11605  * sc - pointer to wm_hw structure
   11606  * index - The starting byte index of the word to read.
   11607  * data - Pointer to a word to store the value read.
   11608  *****************************************************************************/
   11609 static int32_t
   11610 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11611 {
   11612 	int32_t status;
   11613 	uint32_t word = 0;
   11614 
   11615 	status = wm_read_ich8_data(sc, index, 2, &word);
   11616 	if (status == 0)
   11617 		*data = (uint16_t)word;
   11618 	else
   11619 		*data = 0;
   11620 
   11621 	return status;
   11622 }
   11623 
   11624 /******************************************************************************
   11625  * Reads a dword from the NVM using the ICH8 flash access registers.
   11626  *
   11627  * sc - pointer to wm_hw structure
   11628  * index - The starting byte index of the word to read.
   11629  * data - Pointer to a word to store the value read.
   11630  *****************************************************************************/
   11631 static int32_t
   11632 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11633 {
   11634 	int32_t status;
   11635 
   11636 	status = wm_read_ich8_data(sc, index, 4, data);
   11637 	return status;
   11638 }
   11639 
   11640 /******************************************************************************
   11641  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11642  * register.
   11643  *
   11644  * sc - Struct containing variables accessed by shared code
   11645  * offset - offset of word in the EEPROM to read
   11646  * data - word read from the EEPROM
   11647  * words - number of words to read
   11648  *****************************************************************************/
   11649 static int
   11650 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11651 {
   11652 	int32_t  error = 0;
   11653 	uint32_t flash_bank = 0;
   11654 	uint32_t act_offset = 0;
   11655 	uint32_t bank_offset = 0;
   11656 	uint16_t word = 0;
   11657 	uint16_t i = 0;
   11658 
   11659 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11660 		device_xname(sc->sc_dev), __func__));
   11661 
   11662 	/*
   11663 	 * We need to know which is the valid flash bank.  In the event
   11664 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11665 	 * managing flash_bank.  So it cannot be trusted and needs
   11666 	 * to be updated with each read.
   11667 	 */
   11668 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11669 	if (error) {
   11670 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11671 			device_xname(sc->sc_dev)));
   11672 		flash_bank = 0;
   11673 	}
   11674 
   11675 	/*
   11676 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11677 	 * size
   11678 	 */
   11679 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11680 
   11681 	error = wm_get_swfwhw_semaphore(sc);
   11682 	if (error) {
   11683 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11684 		    __func__);
   11685 		return error;
   11686 	}
   11687 
   11688 	for (i = 0; i < words; i++) {
   11689 		/* The NVM part needs a byte offset, hence * 2 */
   11690 		act_offset = bank_offset + ((offset + i) * 2);
   11691 		error = wm_read_ich8_word(sc, act_offset, &word);
   11692 		if (error) {
   11693 			aprint_error_dev(sc->sc_dev,
   11694 			    "%s: failed to read NVM\n", __func__);
   11695 			break;
   11696 		}
   11697 		data[i] = word;
   11698 	}
   11699 
   11700 	wm_put_swfwhw_semaphore(sc);
   11701 	return error;
   11702 }
   11703 
   11704 /******************************************************************************
   11705  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11706  * register.
   11707  *
   11708  * sc - Struct containing variables accessed by shared code
   11709  * offset - offset of word in the EEPROM to read
   11710  * data - word read from the EEPROM
   11711  * words - number of words to read
   11712  *****************************************************************************/
   11713 static int
   11714 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11715 {
   11716 	int32_t  error = 0;
   11717 	uint32_t flash_bank = 0;
   11718 	uint32_t act_offset = 0;
   11719 	uint32_t bank_offset = 0;
   11720 	uint32_t dword = 0;
   11721 	uint16_t i = 0;
   11722 
   11723 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11724 		device_xname(sc->sc_dev), __func__));
   11725 
   11726 	/*
   11727 	 * We need to know which is the valid flash bank.  In the event
   11728 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11729 	 * managing flash_bank.  So it cannot be trusted and needs
   11730 	 * to be updated with each read.
   11731 	 */
   11732 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11733 	if (error) {
   11734 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11735 			device_xname(sc->sc_dev)));
   11736 		flash_bank = 0;
   11737 	}
   11738 
   11739 	/*
   11740 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11741 	 * size
   11742 	 */
   11743 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11744 
   11745 	error = wm_get_swfwhw_semaphore(sc);
   11746 	if (error) {
   11747 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11748 		    __func__);
   11749 		return error;
   11750 	}
   11751 
   11752 	for (i = 0; i < words; i++) {
   11753 		/* The NVM part needs a byte offset, hence * 2 */
   11754 		act_offset = bank_offset + ((offset + i) * 2);
   11755 		/* but we must read dword aligned, so mask ... */
   11756 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11757 		if (error) {
   11758 			aprint_error_dev(sc->sc_dev,
   11759 			    "%s: failed to read NVM\n", __func__);
   11760 			break;
   11761 		}
   11762 		/* ... and pick out low or high word */
   11763 		if ((act_offset & 0x2) == 0)
   11764 			data[i] = (uint16_t)(dword & 0xFFFF);
   11765 		else
   11766 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11767 	}
   11768 
   11769 	wm_put_swfwhw_semaphore(sc);
   11770 	return error;
   11771 }
   11772 
   11773 /* iNVM */
   11774 
   11775 static int
   11776 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11777 {
   11778 	int32_t  rv = 0;
   11779 	uint32_t invm_dword;
   11780 	uint16_t i;
   11781 	uint8_t record_type, word_address;
   11782 
   11783 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11784 		device_xname(sc->sc_dev), __func__));
   11785 
   11786 	for (i = 0; i < INVM_SIZE; i++) {
   11787 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11788 		/* Get record type */
   11789 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11790 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11791 			break;
   11792 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11793 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11794 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11795 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11796 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11797 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11798 			if (word_address == address) {
   11799 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11800 				rv = 0;
   11801 				break;
   11802 			}
   11803 		}
   11804 	}
   11805 
   11806 	return rv;
   11807 }
   11808 
   11809 static int
   11810 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11811 {
   11812 	int rv = 0;
   11813 	int i;
   11814 
   11815 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11816 		device_xname(sc->sc_dev), __func__));
   11817 
   11818 	for (i = 0; i < words; i++) {
   11819 		switch (offset + i) {
   11820 		case NVM_OFF_MACADDR:
   11821 		case NVM_OFF_MACADDR1:
   11822 		case NVM_OFF_MACADDR2:
   11823 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11824 			if (rv != 0) {
   11825 				data[i] = 0xffff;
   11826 				rv = -1;
   11827 			}
   11828 			break;
   11829 		case NVM_OFF_CFG2:
   11830 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11831 			if (rv != 0) {
   11832 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11833 				rv = 0;
   11834 			}
   11835 			break;
   11836 		case NVM_OFF_CFG4:
   11837 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11838 			if (rv != 0) {
   11839 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11840 				rv = 0;
   11841 			}
   11842 			break;
   11843 		case NVM_OFF_LED_1_CFG:
   11844 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11845 			if (rv != 0) {
   11846 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11847 				rv = 0;
   11848 			}
   11849 			break;
   11850 		case NVM_OFF_LED_0_2_CFG:
   11851 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11852 			if (rv != 0) {
   11853 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11854 				rv = 0;
   11855 			}
   11856 			break;
   11857 		case NVM_OFF_ID_LED_SETTINGS:
   11858 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11859 			if (rv != 0) {
   11860 				*data = ID_LED_RESERVED_FFFF;
   11861 				rv = 0;
   11862 			}
   11863 			break;
   11864 		default:
   11865 			DPRINTF(WM_DEBUG_NVM,
   11866 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11867 			*data = NVM_RESERVED_WORD;
   11868 			break;
   11869 		}
   11870 	}
   11871 
   11872 	return rv;
   11873 }
   11874 
   11875 /* Lock, detecting NVM type, validate checksum, version and read */
   11876 
   11877 /*
   11878  * wm_nvm_acquire:
   11879  *
   11880  *	Perform the EEPROM handshake required on some chips.
   11881  */
   11882 static int
   11883 wm_nvm_acquire(struct wm_softc *sc)
   11884 {
   11885 	uint32_t reg;
   11886 	int x;
   11887 	int ret = 0;
   11888 
   11889 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11890 		device_xname(sc->sc_dev), __func__));
   11891 
   11892 	if (sc->sc_type >= WM_T_ICH8) {
   11893 		ret = wm_get_nvm_ich8lan(sc);
   11894 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11895 		ret = wm_get_swfwhw_semaphore(sc);
   11896 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11897 		/* This will also do wm_get_swsm_semaphore() if needed */
   11898 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11899 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11900 		ret = wm_get_swsm_semaphore(sc);
   11901 	}
   11902 
   11903 	if (ret) {
   11904 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11905 			__func__);
   11906 		return 1;
   11907 	}
   11908 
   11909 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11910 		reg = CSR_READ(sc, WMREG_EECD);
   11911 
   11912 		/* Request EEPROM access. */
   11913 		reg |= EECD_EE_REQ;
   11914 		CSR_WRITE(sc, WMREG_EECD, reg);
   11915 
   11916 		/* ..and wait for it to be granted. */
   11917 		for (x = 0; x < 1000; x++) {
   11918 			reg = CSR_READ(sc, WMREG_EECD);
   11919 			if (reg & EECD_EE_GNT)
   11920 				break;
   11921 			delay(5);
   11922 		}
   11923 		if ((reg & EECD_EE_GNT) == 0) {
   11924 			aprint_error_dev(sc->sc_dev,
   11925 			    "could not acquire EEPROM GNT\n");
   11926 			reg &= ~EECD_EE_REQ;
   11927 			CSR_WRITE(sc, WMREG_EECD, reg);
   11928 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11929 				wm_put_swfwhw_semaphore(sc);
   11930 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11931 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11932 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11933 				wm_put_swsm_semaphore(sc);
   11934 			return 1;
   11935 		}
   11936 	}
   11937 
   11938 	return 0;
   11939 }
   11940 
   11941 /*
   11942  * wm_nvm_release:
   11943  *
   11944  *	Release the EEPROM mutex.
   11945  */
   11946 static void
   11947 wm_nvm_release(struct wm_softc *sc)
   11948 {
   11949 	uint32_t reg;
   11950 
   11951 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11952 		device_xname(sc->sc_dev), __func__));
   11953 
   11954 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11955 		reg = CSR_READ(sc, WMREG_EECD);
   11956 		reg &= ~EECD_EE_REQ;
   11957 		CSR_WRITE(sc, WMREG_EECD, reg);
   11958 	}
   11959 
   11960 	if (sc->sc_type >= WM_T_ICH8) {
   11961 		wm_put_nvm_ich8lan(sc);
   11962 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11963 		wm_put_swfwhw_semaphore(sc);
   11964 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11965 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11966 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11967 		wm_put_swsm_semaphore(sc);
   11968 }
   11969 
   11970 static int
   11971 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11972 {
   11973 	uint32_t eecd = 0;
   11974 
   11975 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11976 	    || sc->sc_type == WM_T_82583) {
   11977 		eecd = CSR_READ(sc, WMREG_EECD);
   11978 
   11979 		/* Isolate bits 15 & 16 */
   11980 		eecd = ((eecd >> 15) & 0x03);
   11981 
   11982 		/* If both bits are set, device is Flash type */
   11983 		if (eecd == 0x03)
   11984 			return 0;
   11985 	}
   11986 	return 1;
   11987 }
   11988 
   11989 static int
   11990 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11991 {
   11992 	uint32_t eec;
   11993 
   11994 	eec = CSR_READ(sc, WMREG_EEC);
   11995 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11996 		return 1;
   11997 
   11998 	return 0;
   11999 }
   12000 
   12001 /*
   12002  * wm_nvm_validate_checksum
   12003  *
   12004  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12005  */
   12006 static int
   12007 wm_nvm_validate_checksum(struct wm_softc *sc)
   12008 {
   12009 	uint16_t checksum;
   12010 	uint16_t eeprom_data;
   12011 #ifdef WM_DEBUG
   12012 	uint16_t csum_wordaddr, valid_checksum;
   12013 #endif
   12014 	int i;
   12015 
   12016 	checksum = 0;
   12017 
   12018 	/* Don't check for I211 */
   12019 	if (sc->sc_type == WM_T_I211)
   12020 		return 0;
   12021 
   12022 #ifdef WM_DEBUG
   12023 	if (sc->sc_type == WM_T_PCH_LPT) {
   12024 		csum_wordaddr = NVM_OFF_COMPAT;
   12025 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12026 	} else {
   12027 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12028 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12029 	}
   12030 
   12031 	/* Dump EEPROM image for debug */
   12032 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12033 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12034 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12035 		/* XXX PCH_SPT? */
   12036 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12037 		if ((eeprom_data & valid_checksum) == 0) {
   12038 			DPRINTF(WM_DEBUG_NVM,
   12039 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12040 				device_xname(sc->sc_dev), eeprom_data,
   12041 				    valid_checksum));
   12042 		}
   12043 	}
   12044 
   12045 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12046 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12047 		for (i = 0; i < NVM_SIZE; i++) {
   12048 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12049 				printf("XXXX ");
   12050 			else
   12051 				printf("%04hx ", eeprom_data);
   12052 			if (i % 8 == 7)
   12053 				printf("\n");
   12054 		}
   12055 	}
   12056 
   12057 #endif /* WM_DEBUG */
   12058 
   12059 	for (i = 0; i < NVM_SIZE; i++) {
   12060 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12061 			return 1;
   12062 		checksum += eeprom_data;
   12063 	}
   12064 
   12065 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12066 #ifdef WM_DEBUG
   12067 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12068 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12069 #endif
   12070 	}
   12071 
   12072 	return 0;
   12073 }
   12074 
   12075 static void
   12076 wm_nvm_version_invm(struct wm_softc *sc)
   12077 {
   12078 	uint32_t dword;
   12079 
   12080 	/*
   12081 	 * Linux's code to decode version is very strange, so we don't
   12082 	 * obey that algorithm and just use word 61 as the document.
   12083 	 * Perhaps it's not perfect though...
   12084 	 *
   12085 	 * Example:
   12086 	 *
   12087 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12088 	 */
   12089 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12090 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12091 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12092 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12093 }
   12094 
   12095 static void
   12096 wm_nvm_version(struct wm_softc *sc)
   12097 {
   12098 	uint16_t major, minor, build, patch;
   12099 	uint16_t uid0, uid1;
   12100 	uint16_t nvm_data;
   12101 	uint16_t off;
   12102 	bool check_version = false;
   12103 	bool check_optionrom = false;
   12104 	bool have_build = false;
   12105 
   12106 	/*
   12107 	 * Version format:
   12108 	 *
   12109 	 * XYYZ
   12110 	 * X0YZ
   12111 	 * X0YY
   12112 	 *
   12113 	 * Example:
   12114 	 *
   12115 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12116 	 *	82571	0x50a6	5.10.6?
   12117 	 *	82572	0x506a	5.6.10?
   12118 	 *	82572EI	0x5069	5.6.9?
   12119 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12120 	 *		0x2013	2.1.3?
   12121 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12122 	 */
   12123 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12124 	switch (sc->sc_type) {
   12125 	case WM_T_82571:
   12126 	case WM_T_82572:
   12127 	case WM_T_82574:
   12128 	case WM_T_82583:
   12129 		check_version = true;
   12130 		check_optionrom = true;
   12131 		have_build = true;
   12132 		break;
   12133 	case WM_T_82575:
   12134 	case WM_T_82576:
   12135 	case WM_T_82580:
   12136 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12137 			check_version = true;
   12138 		break;
   12139 	case WM_T_I211:
   12140 		wm_nvm_version_invm(sc);
   12141 		goto printver;
   12142 	case WM_T_I210:
   12143 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12144 			wm_nvm_version_invm(sc);
   12145 			goto printver;
   12146 		}
   12147 		/* FALLTHROUGH */
   12148 	case WM_T_I350:
   12149 	case WM_T_I354:
   12150 		check_version = true;
   12151 		check_optionrom = true;
   12152 		break;
   12153 	default:
   12154 		return;
   12155 	}
   12156 	if (check_version) {
   12157 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12158 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12159 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12160 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12161 			build = nvm_data & NVM_BUILD_MASK;
   12162 			have_build = true;
   12163 		} else
   12164 			minor = nvm_data & 0x00ff;
   12165 
   12166 		/* Decimal */
   12167 		minor = (minor / 16) * 10 + (minor % 16);
   12168 		sc->sc_nvm_ver_major = major;
   12169 		sc->sc_nvm_ver_minor = minor;
   12170 
   12171 printver:
   12172 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12173 		    sc->sc_nvm_ver_minor);
   12174 		if (have_build) {
   12175 			sc->sc_nvm_ver_build = build;
   12176 			aprint_verbose(".%d", build);
   12177 		}
   12178 	}
   12179 	if (check_optionrom) {
   12180 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12181 		/* Option ROM Version */
   12182 		if ((off != 0x0000) && (off != 0xffff)) {
   12183 			off += NVM_COMBO_VER_OFF;
   12184 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12185 			wm_nvm_read(sc, off, 1, &uid0);
   12186 			if ((uid0 != 0) && (uid0 != 0xffff)
   12187 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12188 				/* 16bits */
   12189 				major = uid0 >> 8;
   12190 				build = (uid0 << 8) | (uid1 >> 8);
   12191 				patch = uid1 & 0x00ff;
   12192 				aprint_verbose(", option ROM Version %d.%d.%d",
   12193 				    major, build, patch);
   12194 			}
   12195 		}
   12196 	}
   12197 
   12198 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12199 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12200 }
   12201 
   12202 /*
   12203  * wm_nvm_read:
   12204  *
   12205  *	Read data from the serial EEPROM.
   12206  */
   12207 static int
   12208 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12209 {
   12210 	int rv;
   12211 
   12212 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12213 		device_xname(sc->sc_dev), __func__));
   12214 
   12215 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12216 		return 1;
   12217 
   12218 	if (wm_nvm_acquire(sc))
   12219 		return 1;
   12220 
   12221 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12222 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12223 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12224 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12225 	else if (sc->sc_type == WM_T_PCH_SPT)
   12226 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12227 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12228 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12229 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12230 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12231 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12232 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12233 	else
   12234 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12235 
   12236 	wm_nvm_release(sc);
   12237 	return rv;
   12238 }
   12239 
   12240 /*
   12241  * Hardware semaphores.
   12242  * Very complexed...
   12243  */
   12244 
   12245 static int
   12246 wm_get_null(struct wm_softc *sc)
   12247 {
   12248 
   12249 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12250 		device_xname(sc->sc_dev), __func__));
   12251 	return 0;
   12252 }
   12253 
   12254 static void
   12255 wm_put_null(struct wm_softc *sc)
   12256 {
   12257 
   12258 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12259 		device_xname(sc->sc_dev), __func__));
   12260 	return;
   12261 }
   12262 
   12263 /*
   12264  * Get hardware semaphore.
   12265  * Same as e1000_get_hw_semaphore_generic()
   12266  */
   12267 static int
   12268 wm_get_swsm_semaphore(struct wm_softc *sc)
   12269 {
   12270 	int32_t timeout;
   12271 	uint32_t swsm;
   12272 
   12273 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12274 		device_xname(sc->sc_dev), __func__));
   12275 	KASSERT(sc->sc_nvm_wordsize > 0);
   12276 
   12277 	/* Get the SW semaphore. */
   12278 	timeout = sc->sc_nvm_wordsize + 1;
   12279 	while (timeout) {
   12280 		swsm = CSR_READ(sc, WMREG_SWSM);
   12281 
   12282 		if ((swsm & SWSM_SMBI) == 0)
   12283 			break;
   12284 
   12285 		delay(50);
   12286 		timeout--;
   12287 	}
   12288 
   12289 	if (timeout == 0) {
   12290 		aprint_error_dev(sc->sc_dev,
   12291 		    "could not acquire SWSM SMBI\n");
   12292 		return 1;
   12293 	}
   12294 
   12295 	/* Get the FW semaphore. */
   12296 	timeout = sc->sc_nvm_wordsize + 1;
   12297 	while (timeout) {
   12298 		swsm = CSR_READ(sc, WMREG_SWSM);
   12299 		swsm |= SWSM_SWESMBI;
   12300 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12301 		/* If we managed to set the bit we got the semaphore. */
   12302 		swsm = CSR_READ(sc, WMREG_SWSM);
   12303 		if (swsm & SWSM_SWESMBI)
   12304 			break;
   12305 
   12306 		delay(50);
   12307 		timeout--;
   12308 	}
   12309 
   12310 	if (timeout == 0) {
   12311 		aprint_error_dev(sc->sc_dev,
   12312 		    "could not acquire SWSM SWESMBI\n");
   12313 		/* Release semaphores */
   12314 		wm_put_swsm_semaphore(sc);
   12315 		return 1;
   12316 	}
   12317 	return 0;
   12318 }
   12319 
   12320 /*
   12321  * Put hardware semaphore.
   12322  * Same as e1000_put_hw_semaphore_generic()
   12323  */
   12324 static void
   12325 wm_put_swsm_semaphore(struct wm_softc *sc)
   12326 {
   12327 	uint32_t swsm;
   12328 
   12329 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12330 		device_xname(sc->sc_dev), __func__));
   12331 
   12332 	swsm = CSR_READ(sc, WMREG_SWSM);
   12333 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12334 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12335 }
   12336 
   12337 /*
   12338  * Get SW/FW semaphore.
   12339  * Same as e1000_acquire_swfw_sync_82575().
   12340  */
   12341 static int
   12342 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12343 {
   12344 	uint32_t swfw_sync;
   12345 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12346 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12347 	int timeout = 200;
   12348 
   12349 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12350 		device_xname(sc->sc_dev), __func__));
   12351 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12352 
   12353 	for (timeout = 0; timeout < 200; timeout++) {
   12354 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12355 			if (wm_get_swsm_semaphore(sc)) {
   12356 				aprint_error_dev(sc->sc_dev,
   12357 				    "%s: failed to get semaphore\n",
   12358 				    __func__);
   12359 				return 1;
   12360 			}
   12361 		}
   12362 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12363 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12364 			swfw_sync |= swmask;
   12365 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12366 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12367 				wm_put_swsm_semaphore(sc);
   12368 			return 0;
   12369 		}
   12370 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12371 			wm_put_swsm_semaphore(sc);
   12372 		delay(5000);
   12373 	}
   12374 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12375 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12376 	return 1;
   12377 }
   12378 
   12379 static void
   12380 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12381 {
   12382 	uint32_t swfw_sync;
   12383 
   12384 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12385 		device_xname(sc->sc_dev), __func__));
   12386 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12387 
   12388 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12389 		while (wm_get_swsm_semaphore(sc) != 0)
   12390 			continue;
   12391 	}
   12392 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12393 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12394 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12395 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12396 		wm_put_swsm_semaphore(sc);
   12397 }
   12398 
   12399 static int
   12400 wm_get_phy_82575(struct wm_softc *sc)
   12401 {
   12402 
   12403 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12404 		device_xname(sc->sc_dev), __func__));
   12405 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12406 }
   12407 
   12408 static void
   12409 wm_put_phy_82575(struct wm_softc *sc)
   12410 {
   12411 
   12412 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12413 		device_xname(sc->sc_dev), __func__));
   12414 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12415 }
   12416 
   12417 static int
   12418 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12419 {
   12420 	uint32_t ext_ctrl;
   12421 	int timeout = 200;
   12422 
   12423 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12424 		device_xname(sc->sc_dev), __func__));
   12425 
   12426 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12427 	for (timeout = 0; timeout < 200; timeout++) {
   12428 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12429 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12430 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12431 
   12432 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12433 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12434 			return 0;
   12435 		delay(5000);
   12436 	}
   12437 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12438 	    device_xname(sc->sc_dev), ext_ctrl);
   12439 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12440 	return 1;
   12441 }
   12442 
   12443 static void
   12444 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12445 {
   12446 	uint32_t ext_ctrl;
   12447 
   12448 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12449 		device_xname(sc->sc_dev), __func__));
   12450 
   12451 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12452 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12453 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12454 
   12455 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12456 }
   12457 
   12458 static int
   12459 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12460 {
   12461 	uint32_t ext_ctrl;
   12462 	int timeout;
   12463 
   12464 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12465 		device_xname(sc->sc_dev), __func__));
   12466 	mutex_enter(sc->sc_ich_phymtx);
   12467 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12468 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12469 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12470 			break;
   12471 		delay(1000);
   12472 	}
   12473 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12474 		printf("%s: SW has already locked the resource\n",
   12475 		    device_xname(sc->sc_dev));
   12476 		goto out;
   12477 	}
   12478 
   12479 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12480 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12481 	for (timeout = 0; timeout < 1000; timeout++) {
   12482 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12483 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12484 			break;
   12485 		delay(1000);
   12486 	}
   12487 	if (timeout >= 1000) {
   12488 		printf("%s: failed to acquire semaphore\n",
   12489 		    device_xname(sc->sc_dev));
   12490 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12491 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12492 		goto out;
   12493 	}
   12494 	return 0;
   12495 
   12496 out:
   12497 	mutex_exit(sc->sc_ich_phymtx);
   12498 	return 1;
   12499 }
   12500 
   12501 static void
   12502 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12503 {
   12504 	uint32_t ext_ctrl;
   12505 
   12506 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12507 		device_xname(sc->sc_dev), __func__));
   12508 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12509 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12510 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12511 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12512 	} else {
   12513 		printf("%s: Semaphore unexpectedly released\n",
   12514 		    device_xname(sc->sc_dev));
   12515 	}
   12516 
   12517 	mutex_exit(sc->sc_ich_phymtx);
   12518 }
   12519 
   12520 static int
   12521 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12522 {
   12523 
   12524 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12525 		device_xname(sc->sc_dev), __func__));
   12526 	mutex_enter(sc->sc_ich_nvmmtx);
   12527 
   12528 	return 0;
   12529 }
   12530 
   12531 static void
   12532 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12533 {
   12534 
   12535 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12536 		device_xname(sc->sc_dev), __func__));
   12537 	mutex_exit(sc->sc_ich_nvmmtx);
   12538 }
   12539 
   12540 static int
   12541 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12542 {
   12543 	int i = 0;
   12544 	uint32_t reg;
   12545 
   12546 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12547 		device_xname(sc->sc_dev), __func__));
   12548 
   12549 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12550 	do {
   12551 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12552 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12553 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12554 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12555 			break;
   12556 		delay(2*1000);
   12557 		i++;
   12558 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12559 
   12560 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12561 		wm_put_hw_semaphore_82573(sc);
   12562 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12563 		    device_xname(sc->sc_dev));
   12564 		return -1;
   12565 	}
   12566 
   12567 	return 0;
   12568 }
   12569 
   12570 static void
   12571 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12572 {
   12573 	uint32_t reg;
   12574 
   12575 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12576 		device_xname(sc->sc_dev), __func__));
   12577 
   12578 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12579 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12580 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12581 }
   12582 
   12583 /*
   12584  * Management mode and power management related subroutines.
   12585  * BMC, AMT, suspend/resume and EEE.
   12586  */
   12587 
   12588 #ifdef WM_WOL
   12589 static int
   12590 wm_check_mng_mode(struct wm_softc *sc)
   12591 {
   12592 	int rv;
   12593 
   12594 	switch (sc->sc_type) {
   12595 	case WM_T_ICH8:
   12596 	case WM_T_ICH9:
   12597 	case WM_T_ICH10:
   12598 	case WM_T_PCH:
   12599 	case WM_T_PCH2:
   12600 	case WM_T_PCH_LPT:
   12601 	case WM_T_PCH_SPT:
   12602 		rv = wm_check_mng_mode_ich8lan(sc);
   12603 		break;
   12604 	case WM_T_82574:
   12605 	case WM_T_82583:
   12606 		rv = wm_check_mng_mode_82574(sc);
   12607 		break;
   12608 	case WM_T_82571:
   12609 	case WM_T_82572:
   12610 	case WM_T_82573:
   12611 	case WM_T_80003:
   12612 		rv = wm_check_mng_mode_generic(sc);
   12613 		break;
   12614 	default:
   12615 		/* noting to do */
   12616 		rv = 0;
   12617 		break;
   12618 	}
   12619 
   12620 	return rv;
   12621 }
   12622 
   12623 static int
   12624 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12625 {
   12626 	uint32_t fwsm;
   12627 
   12628 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12629 
   12630 	if (((fwsm & FWSM_FW_VALID) != 0)
   12631 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12632 		return 1;
   12633 
   12634 	return 0;
   12635 }
   12636 
   12637 static int
   12638 wm_check_mng_mode_82574(struct wm_softc *sc)
   12639 {
   12640 	uint16_t data;
   12641 
   12642 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12643 
   12644 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12645 		return 1;
   12646 
   12647 	return 0;
   12648 }
   12649 
   12650 static int
   12651 wm_check_mng_mode_generic(struct wm_softc *sc)
   12652 {
   12653 	uint32_t fwsm;
   12654 
   12655 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12656 
   12657 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12658 		return 1;
   12659 
   12660 	return 0;
   12661 }
   12662 #endif /* WM_WOL */
   12663 
   12664 static int
   12665 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12666 {
   12667 	uint32_t manc, fwsm, factps;
   12668 
   12669 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12670 		return 0;
   12671 
   12672 	manc = CSR_READ(sc, WMREG_MANC);
   12673 
   12674 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12675 		device_xname(sc->sc_dev), manc));
   12676 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12677 		return 0;
   12678 
   12679 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12680 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12681 		factps = CSR_READ(sc, WMREG_FACTPS);
   12682 		if (((factps & FACTPS_MNGCG) == 0)
   12683 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12684 			return 1;
   12685 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12686 		uint16_t data;
   12687 
   12688 		factps = CSR_READ(sc, WMREG_FACTPS);
   12689 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12690 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12691 			device_xname(sc->sc_dev), factps, data));
   12692 		if (((factps & FACTPS_MNGCG) == 0)
   12693 		    && ((data & NVM_CFG2_MNGM_MASK)
   12694 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12695 			return 1;
   12696 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12697 	    && ((manc & MANC_ASF_EN) == 0))
   12698 		return 1;
   12699 
   12700 	return 0;
   12701 }
   12702 
   12703 static bool
   12704 wm_phy_resetisblocked(struct wm_softc *sc)
   12705 {
   12706 	bool blocked = false;
   12707 	uint32_t reg;
   12708 	int i = 0;
   12709 
   12710 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12711 		device_xname(sc->sc_dev), __func__));
   12712 
   12713 	switch (sc->sc_type) {
   12714 	case WM_T_ICH8:
   12715 	case WM_T_ICH9:
   12716 	case WM_T_ICH10:
   12717 	case WM_T_PCH:
   12718 	case WM_T_PCH2:
   12719 	case WM_T_PCH_LPT:
   12720 	case WM_T_PCH_SPT:
   12721 		do {
   12722 			reg = CSR_READ(sc, WMREG_FWSM);
   12723 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12724 				blocked = true;
   12725 				delay(10*1000);
   12726 				continue;
   12727 			}
   12728 			blocked = false;
   12729 		} while (blocked && (i++ < 30));
   12730 		return blocked;
   12731 		break;
   12732 	case WM_T_82571:
   12733 	case WM_T_82572:
   12734 	case WM_T_82573:
   12735 	case WM_T_82574:
   12736 	case WM_T_82583:
   12737 	case WM_T_80003:
   12738 		reg = CSR_READ(sc, WMREG_MANC);
   12739 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12740 			return true;
   12741 		else
   12742 			return false;
   12743 		break;
   12744 	default:
   12745 		/* no problem */
   12746 		break;
   12747 	}
   12748 
   12749 	return false;
   12750 }
   12751 
   12752 static void
   12753 wm_get_hw_control(struct wm_softc *sc)
   12754 {
   12755 	uint32_t reg;
   12756 
   12757 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12758 		device_xname(sc->sc_dev), __func__));
   12759 
   12760 	if (sc->sc_type == WM_T_82573) {
   12761 		reg = CSR_READ(sc, WMREG_SWSM);
   12762 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12763 	} else if (sc->sc_type >= WM_T_82571) {
   12764 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12765 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12766 	}
   12767 }
   12768 
   12769 static void
   12770 wm_release_hw_control(struct wm_softc *sc)
   12771 {
   12772 	uint32_t reg;
   12773 
   12774 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12775 		device_xname(sc->sc_dev), __func__));
   12776 
   12777 	if (sc->sc_type == WM_T_82573) {
   12778 		reg = CSR_READ(sc, WMREG_SWSM);
   12779 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12780 	} else if (sc->sc_type >= WM_T_82571) {
   12781 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12782 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12783 	}
   12784 }
   12785 
   12786 static void
   12787 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12788 {
   12789 	uint32_t reg;
   12790 
   12791 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12792 		device_xname(sc->sc_dev), __func__));
   12793 
   12794 	if (sc->sc_type < WM_T_PCH2)
   12795 		return;
   12796 
   12797 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12798 
   12799 	if (gate)
   12800 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12801 	else
   12802 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12803 
   12804 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12805 }
   12806 
   12807 static void
   12808 wm_smbustopci(struct wm_softc *sc)
   12809 {
   12810 	uint32_t fwsm, reg;
   12811 	int rv = 0;
   12812 
   12813 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12814 		device_xname(sc->sc_dev), __func__));
   12815 
   12816 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12817 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12818 
   12819 	/* Disable ULP */
   12820 	wm_ulp_disable(sc);
   12821 
   12822 	/* Acquire PHY semaphore */
   12823 	sc->phy.acquire(sc);
   12824 
   12825 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12826 	switch (sc->sc_type) {
   12827 	case WM_T_PCH_LPT:
   12828 	case WM_T_PCH_SPT:
   12829 		if (wm_phy_is_accessible_pchlan(sc))
   12830 			break;
   12831 
   12832 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12833 		reg |= CTRL_EXT_FORCE_SMBUS;
   12834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12835 #if 0
   12836 		/* XXX Isn't this required??? */
   12837 		CSR_WRITE_FLUSH(sc);
   12838 #endif
   12839 		delay(50 * 1000);
   12840 		/* FALLTHROUGH */
   12841 	case WM_T_PCH2:
   12842 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12843 			break;
   12844 		/* FALLTHROUGH */
   12845 	case WM_T_PCH:
   12846 		if (sc->sc_type == WM_T_PCH)
   12847 			if ((fwsm & FWSM_FW_VALID) != 0)
   12848 				break;
   12849 
   12850 		if (wm_phy_resetisblocked(sc) == true) {
   12851 			printf("XXX reset is blocked(3)\n");
   12852 			break;
   12853 		}
   12854 
   12855 		wm_toggle_lanphypc_pch_lpt(sc);
   12856 
   12857 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12858 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12859 				break;
   12860 
   12861 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12862 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12863 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12864 
   12865 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12866 				break;
   12867 			rv = -1;
   12868 		}
   12869 		break;
   12870 	default:
   12871 		break;
   12872 	}
   12873 
   12874 	/* Release semaphore */
   12875 	sc->phy.release(sc);
   12876 
   12877 	if (rv == 0) {
   12878 		if (wm_phy_resetisblocked(sc)) {
   12879 			printf("XXX reset is blocked(4)\n");
   12880 			goto out;
   12881 		}
   12882 		wm_reset_phy(sc);
   12883 		if (wm_phy_resetisblocked(sc))
   12884 			printf("XXX reset is blocked(4)\n");
   12885 	}
   12886 
   12887 out:
   12888 	/*
   12889 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12890 	 */
   12891 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12892 		delay(10*1000);
   12893 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12894 	}
   12895 }
   12896 
   12897 static void
   12898 wm_init_manageability(struct wm_softc *sc)
   12899 {
   12900 
   12901 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12902 		device_xname(sc->sc_dev), __func__));
   12903 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12904 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12905 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12906 
   12907 		/* Disable hardware interception of ARP */
   12908 		manc &= ~MANC_ARP_EN;
   12909 
   12910 		/* Enable receiving management packets to the host */
   12911 		if (sc->sc_type >= WM_T_82571) {
   12912 			manc |= MANC_EN_MNG2HOST;
   12913 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12914 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12915 		}
   12916 
   12917 		CSR_WRITE(sc, WMREG_MANC, manc);
   12918 	}
   12919 }
   12920 
   12921 static void
   12922 wm_release_manageability(struct wm_softc *sc)
   12923 {
   12924 
   12925 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12926 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12927 
   12928 		manc |= MANC_ARP_EN;
   12929 		if (sc->sc_type >= WM_T_82571)
   12930 			manc &= ~MANC_EN_MNG2HOST;
   12931 
   12932 		CSR_WRITE(sc, WMREG_MANC, manc);
   12933 	}
   12934 }
   12935 
   12936 static void
   12937 wm_get_wakeup(struct wm_softc *sc)
   12938 {
   12939 
   12940 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12941 	switch (sc->sc_type) {
   12942 	case WM_T_82573:
   12943 	case WM_T_82583:
   12944 		sc->sc_flags |= WM_F_HAS_AMT;
   12945 		/* FALLTHROUGH */
   12946 	case WM_T_80003:
   12947 	case WM_T_82575:
   12948 	case WM_T_82576:
   12949 	case WM_T_82580:
   12950 	case WM_T_I350:
   12951 	case WM_T_I354:
   12952 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12953 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12954 		/* FALLTHROUGH */
   12955 	case WM_T_82541:
   12956 	case WM_T_82541_2:
   12957 	case WM_T_82547:
   12958 	case WM_T_82547_2:
   12959 	case WM_T_82571:
   12960 	case WM_T_82572:
   12961 	case WM_T_82574:
   12962 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12963 		break;
   12964 	case WM_T_ICH8:
   12965 	case WM_T_ICH9:
   12966 	case WM_T_ICH10:
   12967 	case WM_T_PCH:
   12968 	case WM_T_PCH2:
   12969 	case WM_T_PCH_LPT:
   12970 	case WM_T_PCH_SPT:
   12971 		sc->sc_flags |= WM_F_HAS_AMT;
   12972 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12973 		break;
   12974 	default:
   12975 		break;
   12976 	}
   12977 
   12978 	/* 1: HAS_MANAGE */
   12979 	if (wm_enable_mng_pass_thru(sc) != 0)
   12980 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12981 
   12982 #ifdef WM_DEBUG
   12983 	printf("\n");
   12984 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12985 		printf("HAS_AMT,");
   12986 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12987 		printf("ARC_SUBSYS_VALID,");
   12988 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12989 		printf("ASF_FIRMWARE_PRES,");
   12990 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12991 		printf("HAS_MANAGE,");
   12992 	printf("\n");
   12993 #endif
   12994 	/*
   12995 	 * Note that the WOL flags is set after the resetting of the eeprom
   12996 	 * stuff
   12997 	 */
   12998 }
   12999 
   13000 /*
   13001  * Unconfigure Ultra Low Power mode.
   13002  * Only for I217 and newer (see below).
   13003  */
   13004 static void
   13005 wm_ulp_disable(struct wm_softc *sc)
   13006 {
   13007 	uint32_t reg;
   13008 	int i = 0;
   13009 
   13010 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13011 		device_xname(sc->sc_dev), __func__));
   13012 	/* Exclude old devices */
   13013 	if ((sc->sc_type < WM_T_PCH_LPT)
   13014 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13015 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13016 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13017 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13018 		return;
   13019 
   13020 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13021 		/* Request ME un-configure ULP mode in the PHY */
   13022 		reg = CSR_READ(sc, WMREG_H2ME);
   13023 		reg &= ~H2ME_ULP;
   13024 		reg |= H2ME_ENFORCE_SETTINGS;
   13025 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13026 
   13027 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13028 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13029 			if (i++ == 30) {
   13030 				printf("%s timed out\n", __func__);
   13031 				return;
   13032 			}
   13033 			delay(10 * 1000);
   13034 		}
   13035 		reg = CSR_READ(sc, WMREG_H2ME);
   13036 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13037 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13038 
   13039 		return;
   13040 	}
   13041 
   13042 	/* Acquire semaphore */
   13043 	sc->phy.acquire(sc);
   13044 
   13045 	/* Toggle LANPHYPC */
   13046 	wm_toggle_lanphypc_pch_lpt(sc);
   13047 
   13048 	/* Unforce SMBus mode in PHY */
   13049 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13050 	if (reg == 0x0000 || reg == 0xffff) {
   13051 		uint32_t reg2;
   13052 
   13053 		printf("%s: Force SMBus first.\n", __func__);
   13054 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13055 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13056 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13057 		delay(50 * 1000);
   13058 
   13059 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13060 	}
   13061 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13062 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13063 
   13064 	/* Unforce SMBus mode in MAC */
   13065 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13066 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13067 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13068 
   13069 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13070 	reg |= HV_PM_CTRL_K1_ENA;
   13071 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13072 
   13073 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13074 	reg &= ~(I218_ULP_CONFIG1_IND
   13075 	    | I218_ULP_CONFIG1_STICKY_ULP
   13076 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13077 	    | I218_ULP_CONFIG1_WOL_HOST
   13078 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13079 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13080 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13081 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13082 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13083 	reg |= I218_ULP_CONFIG1_START;
   13084 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13085 
   13086 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13087 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13088 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13089 
   13090 	/* Release semaphore */
   13091 	sc->phy.release(sc);
   13092 	wm_gmii_reset(sc);
   13093 	delay(50 * 1000);
   13094 }
   13095 
   13096 /* WOL in the newer chipset interfaces (pchlan) */
   13097 static void
   13098 wm_enable_phy_wakeup(struct wm_softc *sc)
   13099 {
   13100 #if 0
   13101 	uint16_t preg;
   13102 
   13103 	/* Copy MAC RARs to PHY RARs */
   13104 
   13105 	/* Copy MAC MTA to PHY MTA */
   13106 
   13107 	/* Configure PHY Rx Control register */
   13108 
   13109 	/* Enable PHY wakeup in MAC register */
   13110 
   13111 	/* Configure and enable PHY wakeup in PHY registers */
   13112 
   13113 	/* Activate PHY wakeup */
   13114 
   13115 	/* XXX */
   13116 #endif
   13117 }
   13118 
   13119 /* Power down workaround on D3 */
   13120 static void
   13121 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13122 {
   13123 	uint32_t reg;
   13124 	int i;
   13125 
   13126 	for (i = 0; i < 2; i++) {
   13127 		/* Disable link */
   13128 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13129 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13130 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13131 
   13132 		/*
   13133 		 * Call gig speed drop workaround on Gig disable before
   13134 		 * accessing any PHY registers
   13135 		 */
   13136 		if (sc->sc_type == WM_T_ICH8)
   13137 			wm_gig_downshift_workaround_ich8lan(sc);
   13138 
   13139 		/* Write VR power-down enable */
   13140 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13141 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13142 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13143 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13144 
   13145 		/* Read it back and test */
   13146 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13147 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13148 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13149 			break;
   13150 
   13151 		/* Issue PHY reset and repeat at most one more time */
   13152 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13153 	}
   13154 }
   13155 
   13156 static void
   13157 wm_enable_wakeup(struct wm_softc *sc)
   13158 {
   13159 	uint32_t reg, pmreg;
   13160 	pcireg_t pmode;
   13161 
   13162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13163 		device_xname(sc->sc_dev), __func__));
   13164 
   13165 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13166 		&pmreg, NULL) == 0)
   13167 		return;
   13168 
   13169 	/* Advertise the wakeup capability */
   13170 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13171 	    | CTRL_SWDPIN(3));
   13172 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13173 
   13174 	/* ICH workaround */
   13175 	switch (sc->sc_type) {
   13176 	case WM_T_ICH8:
   13177 	case WM_T_ICH9:
   13178 	case WM_T_ICH10:
   13179 	case WM_T_PCH:
   13180 	case WM_T_PCH2:
   13181 	case WM_T_PCH_LPT:
   13182 	case WM_T_PCH_SPT:
   13183 		/* Disable gig during WOL */
   13184 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13185 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13186 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13187 		if (sc->sc_type == WM_T_PCH)
   13188 			wm_gmii_reset(sc);
   13189 
   13190 		/* Power down workaround */
   13191 		if (sc->sc_phytype == WMPHY_82577) {
   13192 			struct mii_softc *child;
   13193 
   13194 			/* Assume that the PHY is copper */
   13195 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13196 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13197 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13198 				    (768 << 5) | 25, 0x0444); /* magic num */
   13199 		}
   13200 		break;
   13201 	default:
   13202 		break;
   13203 	}
   13204 
   13205 	/* Keep the laser running on fiber adapters */
   13206 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13207 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13208 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13209 		reg |= CTRL_EXT_SWDPIN(3);
   13210 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13211 	}
   13212 
   13213 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13214 #if 0	/* for the multicast packet */
   13215 	reg |= WUFC_MC;
   13216 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13217 #endif
   13218 
   13219 	if (sc->sc_type >= WM_T_PCH)
   13220 		wm_enable_phy_wakeup(sc);
   13221 	else {
   13222 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13223 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13224 	}
   13225 
   13226 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13227 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13228 		|| (sc->sc_type == WM_T_PCH2))
   13229 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13230 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13231 
   13232 	/* Request PME */
   13233 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13234 #if 0
   13235 	/* Disable WOL */
   13236 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13237 #else
   13238 	/* For WOL */
   13239 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13240 #endif
   13241 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13242 }
   13243 
   13244 /* LPLU */
   13245 
   13246 static void
   13247 wm_lplu_d0_disable(struct wm_softc *sc)
   13248 {
   13249 	uint32_t reg;
   13250 
   13251 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13252 		device_xname(sc->sc_dev), __func__));
   13253 
   13254 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13255 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13256 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13257 }
   13258 
   13259 static void
   13260 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13261 {
   13262 	uint32_t reg;
   13263 
   13264 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13265 		device_xname(sc->sc_dev), __func__));
   13266 
   13267 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13268 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13269 	reg |= HV_OEM_BITS_ANEGNOW;
   13270 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13271 }
   13272 
   13273 /* EEE */
   13274 
   13275 static void
   13276 wm_set_eee_i350(struct wm_softc *sc)
   13277 {
   13278 	uint32_t ipcnfg, eeer;
   13279 
   13280 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13281 	eeer = CSR_READ(sc, WMREG_EEER);
   13282 
   13283 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13284 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13285 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13286 		    | EEER_LPI_FC);
   13287 	} else {
   13288 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13289 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13290 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13291 		    | EEER_LPI_FC);
   13292 	}
   13293 
   13294 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13295 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13296 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13297 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13298 }
   13299 
   13300 /*
   13301  * Workarounds (mainly PHY related).
   13302  * Basically, PHY's workarounds are in the PHY drivers.
   13303  */
   13304 
   13305 /* Work-around for 82566 Kumeran PCS lock loss */
   13306 static void
   13307 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13308 {
   13309 #if 0
   13310 	int miistatus, active, i;
   13311 	int reg;
   13312 
   13313 	miistatus = sc->sc_mii.mii_media_status;
   13314 
   13315 	/* If the link is not up, do nothing */
   13316 	if ((miistatus & IFM_ACTIVE) == 0)
   13317 		return;
   13318 
   13319 	active = sc->sc_mii.mii_media_active;
   13320 
   13321 	/* Nothing to do if the link is other than 1Gbps */
   13322 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13323 		return;
   13324 
   13325 	for (i = 0; i < 10; i++) {
   13326 		/* read twice */
   13327 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13328 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13329 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13330 			goto out;	/* GOOD! */
   13331 
   13332 		/* Reset the PHY */
   13333 		wm_gmii_reset(sc);
   13334 		delay(5*1000);
   13335 	}
   13336 
   13337 	/* Disable GigE link negotiation */
   13338 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13339 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13340 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13341 
   13342 	/*
   13343 	 * Call gig speed drop workaround on Gig disable before accessing
   13344 	 * any PHY registers.
   13345 	 */
   13346 	wm_gig_downshift_workaround_ich8lan(sc);
   13347 
   13348 out:
   13349 	return;
   13350 #endif
   13351 }
   13352 
   13353 /* WOL from S5 stops working */
   13354 static void
   13355 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13356 {
   13357 	uint16_t kmrn_reg;
   13358 
   13359 	/* Only for igp3 */
   13360 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13361 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13362 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13363 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13364 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13365 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13366 	}
   13367 }
   13368 
   13369 /*
   13370  * Workaround for pch's PHYs
   13371  * XXX should be moved to new PHY driver?
   13372  */
   13373 static void
   13374 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13375 {
   13376 
   13377 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13378 		device_xname(sc->sc_dev), __func__));
   13379 	KASSERT(sc->sc_type == WM_T_PCH);
   13380 
   13381 	if (sc->sc_phytype == WMPHY_82577)
   13382 		wm_set_mdio_slow_mode_hv(sc);
   13383 
   13384 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13385 
   13386 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13387 
   13388 	/* 82578 */
   13389 	if (sc->sc_phytype == WMPHY_82578) {
   13390 		struct mii_softc *child;
   13391 
   13392 		/*
   13393 		 * Return registers to default by doing a soft reset then
   13394 		 * writing 0x3140 to the control register
   13395 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13396 		 */
   13397 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13398 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13399 			PHY_RESET(child);
   13400 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13401 			    0x3140);
   13402 		}
   13403 	}
   13404 
   13405 	/* Select page 0 */
   13406 	sc->phy.acquire(sc);
   13407 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13408 	sc->phy.release(sc);
   13409 
   13410 	/*
   13411 	 * Configure the K1 Si workaround during phy reset assuming there is
   13412 	 * link so that it disables K1 if link is in 1Gbps.
   13413 	 */
   13414 	wm_k1_gig_workaround_hv(sc, 1);
   13415 }
   13416 
   13417 static void
   13418 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13419 {
   13420 
   13421 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13422 		device_xname(sc->sc_dev), __func__));
   13423 	KASSERT(sc->sc_type == WM_T_PCH2);
   13424 
   13425 	wm_set_mdio_slow_mode_hv(sc);
   13426 }
   13427 
   13428 static int
   13429 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13430 {
   13431 	int k1_enable = sc->sc_nvm_k1_enabled;
   13432 
   13433 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13434 		device_xname(sc->sc_dev), __func__));
   13435 
   13436 	if (sc->phy.acquire(sc) != 0)
   13437 		return -1;
   13438 
   13439 	if (link) {
   13440 		k1_enable = 0;
   13441 
   13442 		/* Link stall fix for link up */
   13443 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13444 	} else {
   13445 		/* Link stall fix for link down */
   13446 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13447 	}
   13448 
   13449 	wm_configure_k1_ich8lan(sc, k1_enable);
   13450 	sc->phy.release(sc);
   13451 
   13452 	return 0;
   13453 }
   13454 
   13455 static void
   13456 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13457 {
   13458 	uint32_t reg;
   13459 
   13460 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13461 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13462 	    reg | HV_KMRN_MDIO_SLOW);
   13463 }
   13464 
   13465 static void
   13466 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13467 {
   13468 	uint32_t ctrl, ctrl_ext, tmp;
   13469 	uint16_t kmrn_reg;
   13470 
   13471 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13472 
   13473 	if (k1_enable)
   13474 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13475 	else
   13476 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13477 
   13478 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13479 
   13480 	delay(20);
   13481 
   13482 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13483 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13484 
   13485 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13486 	tmp |= CTRL_FRCSPD;
   13487 
   13488 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13489 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13490 	CSR_WRITE_FLUSH(sc);
   13491 	delay(20);
   13492 
   13493 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13494 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13495 	CSR_WRITE_FLUSH(sc);
   13496 	delay(20);
   13497 }
   13498 
   13499 /* special case - for 82575 - need to do manual init ... */
   13500 static void
   13501 wm_reset_init_script_82575(struct wm_softc *sc)
   13502 {
   13503 	/*
   13504 	 * remark: this is untested code - we have no board without EEPROM
   13505 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13506 	 */
   13507 
   13508 	/* SerDes configuration via SERDESCTRL */
   13509 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13510 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13511 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13512 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13513 
   13514 	/* CCM configuration via CCMCTL register */
   13515 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13516 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13517 
   13518 	/* PCIe lanes configuration */
   13519 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13520 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13521 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13522 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13523 
   13524 	/* PCIe PLL Configuration */
   13525 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13526 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13527 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13528 }
   13529 
   13530 static void
   13531 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13532 {
   13533 	uint32_t reg;
   13534 	uint16_t nvmword;
   13535 	int rv;
   13536 
   13537 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13538 		return;
   13539 
   13540 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13541 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13542 	if (rv != 0) {
   13543 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13544 		    __func__);
   13545 		return;
   13546 	}
   13547 
   13548 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13549 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13550 		reg |= MDICNFG_DEST;
   13551 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13552 		reg |= MDICNFG_COM_MDIO;
   13553 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13554 }
   13555 
   13556 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13557 
   13558 static bool
   13559 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13560 {
   13561 	int i;
   13562 	uint32_t reg;
   13563 	uint16_t id1, id2;
   13564 
   13565 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13566 		device_xname(sc->sc_dev), __func__));
   13567 	id1 = id2 = 0xffff;
   13568 	for (i = 0; i < 2; i++) {
   13569 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13570 		if (MII_INVALIDID(id1))
   13571 			continue;
   13572 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13573 		if (MII_INVALIDID(id2))
   13574 			continue;
   13575 		break;
   13576 	}
   13577 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13578 		goto out;
   13579 	}
   13580 
   13581 	if (sc->sc_type < WM_T_PCH_LPT) {
   13582 		sc->phy.release(sc);
   13583 		wm_set_mdio_slow_mode_hv(sc);
   13584 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13585 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13586 		sc->phy.acquire(sc);
   13587 	}
   13588 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13589 		printf("XXX return with false\n");
   13590 		return false;
   13591 	}
   13592 out:
   13593 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13594 		/* Only unforce SMBus if ME is not active */
   13595 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13596 			/* Unforce SMBus mode in PHY */
   13597 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13598 			    CV_SMB_CTRL);
   13599 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13600 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13601 			    CV_SMB_CTRL, reg);
   13602 
   13603 			/* Unforce SMBus mode in MAC */
   13604 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13605 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13606 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13607 		}
   13608 	}
   13609 	return true;
   13610 }
   13611 
   13612 static void
   13613 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13614 {
   13615 	uint32_t reg;
   13616 	int i;
   13617 
   13618 	/* Set PHY Config Counter to 50msec */
   13619 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13620 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13621 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13622 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13623 
   13624 	/* Toggle LANPHYPC */
   13625 	reg = CSR_READ(sc, WMREG_CTRL);
   13626 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13627 	reg &= ~CTRL_LANPHYPC_VALUE;
   13628 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13629 	CSR_WRITE_FLUSH(sc);
   13630 	delay(1000);
   13631 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13632 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13633 	CSR_WRITE_FLUSH(sc);
   13634 
   13635 	if (sc->sc_type < WM_T_PCH_LPT)
   13636 		delay(50 * 1000);
   13637 	else {
   13638 		i = 20;
   13639 
   13640 		do {
   13641 			delay(5 * 1000);
   13642 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13643 		    && i--);
   13644 
   13645 		delay(30 * 1000);
   13646 	}
   13647 }
   13648 
   13649 static int
   13650 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13651 {
   13652 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13653 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13654 	uint32_t rxa;
   13655 	uint16_t scale = 0, lat_enc = 0;
   13656 	int64_t lat_ns, value;
   13657 
   13658 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13659 		device_xname(sc->sc_dev), __func__));
   13660 
   13661 	if (link) {
   13662 		pcireg_t preg;
   13663 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13664 
   13665 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13666 
   13667 		/*
   13668 		 * Determine the maximum latency tolerated by the device.
   13669 		 *
   13670 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13671 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13672 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13673 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13674 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13675 		 */
   13676 		lat_ns = ((int64_t)rxa * 1024 -
   13677 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13678 		if (lat_ns < 0)
   13679 			lat_ns = 0;
   13680 		else {
   13681 			uint32_t status;
   13682 			uint16_t speed;
   13683 
   13684 			status = CSR_READ(sc, WMREG_STATUS);
   13685 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13686 			case STATUS_SPEED_10:
   13687 				speed = 10;
   13688 				break;
   13689 			case STATUS_SPEED_100:
   13690 				speed = 100;
   13691 				break;
   13692 			case STATUS_SPEED_1000:
   13693 				speed = 1000;
   13694 				break;
   13695 			default:
   13696 				printf("%s: Unknown speed (status = %08x)\n",
   13697 				    device_xname(sc->sc_dev), status);
   13698 				return -1;
   13699 			}
   13700 			lat_ns /= speed;
   13701 		}
   13702 		value = lat_ns;
   13703 
   13704 		while (value > LTRV_VALUE) {
   13705 			scale ++;
   13706 			value = howmany(value, __BIT(5));
   13707 		}
   13708 		if (scale > LTRV_SCALE_MAX) {
   13709 			printf("%s: Invalid LTR latency scale %d\n",
   13710 			    device_xname(sc->sc_dev), scale);
   13711 			return -1;
   13712 		}
   13713 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13714 
   13715 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13716 		    WM_PCI_LTR_CAP_LPT);
   13717 		max_snoop = preg & 0xffff;
   13718 		max_nosnoop = preg >> 16;
   13719 
   13720 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13721 
   13722 		if (lat_enc > max_ltr_enc) {
   13723 			lat_enc = max_ltr_enc;
   13724 		}
   13725 	}
   13726 	/* Snoop and No-Snoop latencies the same */
   13727 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13728 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13729 
   13730 	return 0;
   13731 }
   13732 
   13733 /*
   13734  * I210 Errata 25 and I211 Errata 10
   13735  * Slow System Clock.
   13736  */
   13737 static void
   13738 wm_pll_workaround_i210(struct wm_softc *sc)
   13739 {
   13740 	uint32_t mdicnfg, wuc;
   13741 	uint32_t reg;
   13742 	pcireg_t pcireg;
   13743 	uint32_t pmreg;
   13744 	uint16_t nvmword, tmp_nvmword;
   13745 	int phyval;
   13746 	bool wa_done = false;
   13747 	int i;
   13748 
   13749 	/* Save WUC and MDICNFG registers */
   13750 	wuc = CSR_READ(sc, WMREG_WUC);
   13751 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13752 
   13753 	reg = mdicnfg & ~MDICNFG_DEST;
   13754 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13755 
   13756 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13757 		nvmword = INVM_DEFAULT_AL;
   13758 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13759 
   13760 	/* Get Power Management cap offset */
   13761 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13762 		&pmreg, NULL) == 0)
   13763 		return;
   13764 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13765 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13766 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13767 
   13768 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13769 			break; /* OK */
   13770 		}
   13771 
   13772 		wa_done = true;
   13773 		/* Directly reset the internal PHY */
   13774 		reg = CSR_READ(sc, WMREG_CTRL);
   13775 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13776 
   13777 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13778 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13780 
   13781 		CSR_WRITE(sc, WMREG_WUC, 0);
   13782 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13783 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13784 
   13785 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13786 		    pmreg + PCI_PMCSR);
   13787 		pcireg |= PCI_PMCSR_STATE_D3;
   13788 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13789 		    pmreg + PCI_PMCSR, pcireg);
   13790 		delay(1000);
   13791 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13792 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13793 		    pmreg + PCI_PMCSR, pcireg);
   13794 
   13795 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13796 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13797 
   13798 		/* Restore WUC register */
   13799 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13800 	}
   13801 
   13802 	/* Restore MDICNFG setting */
   13803 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13804 	if (wa_done)
   13805 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13806 }
   13807