Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.498
      1 /*	$NetBSD: if_wm.c,v 1.498 2017/03/21 10:39:52 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.498 2017/03/21 10:39:52 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * legacy and msi use sc_ihs[0].
    481 					 */
    482 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    483 	int sc_nintrs;			/* number of interrupts */
    484 
    485 	int sc_link_intr_idx;		/* index of MSI-X tables */
    486 
    487 	callout_t sc_tick_ch;		/* tick callout */
    488 	bool sc_core_stopping;
    489 
    490 	int sc_nvm_ver_major;
    491 	int sc_nvm_ver_minor;
    492 	int sc_nvm_ver_build;
    493 	int sc_nvm_addrbits;		/* NVM address bits */
    494 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    495 	int sc_ich8_flash_base;
    496 	int sc_ich8_flash_bank_size;
    497 	int sc_nvm_k1_enabled;
    498 
    499 	int sc_nqueues;
    500 	struct wm_queue *sc_queue;
    501 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    502 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    503 
    504 	int sc_affinity_offset;
    505 
    506 #ifdef WM_EVENT_COUNTERS
    507 	/* Event counters. */
    508 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    509 
    510         /* WM_T_82542_2_1 only */
    511 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    512 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    513 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    514 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    515 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    516 #endif /* WM_EVENT_COUNTERS */
    517 
    518 	/* This variable are used only on the 82547. */
    519 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    520 
    521 	uint32_t sc_ctrl;		/* prototype CTRL register */
    522 #if 0
    523 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    524 #endif
    525 	uint32_t sc_icr;		/* prototype interrupt bits */
    526 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    527 	uint32_t sc_tctl;		/* prototype TCTL register */
    528 	uint32_t sc_rctl;		/* prototype RCTL register */
    529 	uint32_t sc_txcw;		/* prototype TXCW register */
    530 	uint32_t sc_tipg;		/* prototype TIPG register */
    531 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    532 	uint32_t sc_pba;		/* prototype PBA register */
    533 
    534 	int sc_tbi_linkup;		/* TBI link status */
    535 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    536 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    537 
    538 	int sc_mchash_type;		/* multicast filter offset */
    539 
    540 	krndsource_t rnd_source;	/* random source */
    541 
    542 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    543 
    544 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    545 	kmutex_t *sc_ich_phymtx;	/*
    546 					 * 82574/82583/ICH/PCH specific PHY
    547 					 * mutex. For 82574/82583, the mutex
    548 					 * is used for both PHY and NVM.
    549 					 */
    550 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    551 
    552 	struct wm_phyop phy;
    553 };
    554 
    555 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    556 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    557 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    558 
    559 #define	WM_RXCHAIN_RESET(rxq)						\
    560 do {									\
    561 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    562 	*(rxq)->rxq_tailp = NULL;					\
    563 	(rxq)->rxq_len = 0;						\
    564 } while (/*CONSTCOND*/0)
    565 
    566 #define	WM_RXCHAIN_LINK(rxq, m)						\
    567 do {									\
    568 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    569 	(rxq)->rxq_tailp = &(m)->m_next;				\
    570 } while (/*CONSTCOND*/0)
    571 
    572 #ifdef WM_EVENT_COUNTERS
    573 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    574 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    575 
    576 #define WM_Q_EVCNT_INCR(qname, evname)			\
    577 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    578 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    579 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    580 #else /* !WM_EVENT_COUNTERS */
    581 #define	WM_EVCNT_INCR(ev)	/* nothing */
    582 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    583 
    584 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    585 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    586 #endif /* !WM_EVENT_COUNTERS */
    587 
    588 #define	CSR_READ(sc, reg)						\
    589 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    590 #define	CSR_WRITE(sc, reg, val)						\
    591 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    592 #define	CSR_WRITE_FLUSH(sc)						\
    593 	(void) CSR_READ((sc), WMREG_STATUS)
    594 
    595 #define ICH8_FLASH_READ32(sc, reg)					\
    596 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    597 	    (reg) + sc->sc_flashreg_offset)
    598 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    599 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    600 	    (reg) + sc->sc_flashreg_offset, (data))
    601 
    602 #define ICH8_FLASH_READ16(sc, reg)					\
    603 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset)
    605 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    606 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    607 	    (reg) + sc->sc_flashreg_offset, (data))
    608 
    609 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    610 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    611 
    612 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    613 #define	WM_CDTXADDR_HI(txq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    616 
    617 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    618 #define	WM_CDRXADDR_HI(rxq, x)						\
    619 	(sizeof(bus_addr_t) == 8 ?					\
    620 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    621 
    622 /*
    623  * Register read/write functions.
    624  * Other than CSR_{READ|WRITE}().
    625  */
    626 #if 0
    627 static inline uint32_t wm_io_read(struct wm_softc *, int);
    628 #endif
    629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    631 	uint32_t, uint32_t);
    632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    633 
    634 /*
    635  * Descriptor sync/init functions.
    636  */
    637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    640 
    641 /*
    642  * Device driver interface functions and commonly used functions.
    643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    644  */
    645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    646 static int	wm_match(device_t, cfdata_t, void *);
    647 static void	wm_attach(device_t, device_t, void *);
    648 static int	wm_detach(device_t, int);
    649 static bool	wm_suspend(device_t, const pmf_qual_t *);
    650 static bool	wm_resume(device_t, const pmf_qual_t *);
    651 static void	wm_watchdog(struct ifnet *);
    652 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    653 static void	wm_tick(void *);
    654 static int	wm_ifflags_cb(struct ethercom *);
    655 static int	wm_ioctl(struct ifnet *, u_long, void *);
    656 /* MAC address related */
    657 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    658 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    659 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    660 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    661 static void	wm_set_filter(struct wm_softc *);
    662 /* Reset and init related */
    663 static void	wm_set_vlan(struct wm_softc *);
    664 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    665 static void	wm_get_auto_rd_done(struct wm_softc *);
    666 static void	wm_lan_init_done(struct wm_softc *);
    667 static void	wm_get_cfg_done(struct wm_softc *);
    668 static void	wm_initialize_hardware_bits(struct wm_softc *);
    669 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    670 static void	wm_reset_phy(struct wm_softc *);
    671 static void	wm_flush_desc_rings(struct wm_softc *);
    672 static void	wm_reset(struct wm_softc *);
    673 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    674 static void	wm_rxdrain(struct wm_rxqueue *);
    675 static void	wm_rss_getkey(uint8_t *);
    676 static void	wm_init_rss(struct wm_softc *);
    677 static void	wm_adjust_qnum(struct wm_softc *, int);
    678 static int	wm_setup_legacy(struct wm_softc *);
    679 static int	wm_setup_msix(struct wm_softc *);
    680 static int	wm_init(struct ifnet *);
    681 static int	wm_init_locked(struct ifnet *);
    682 static void	wm_turnon(struct wm_softc *);
    683 static void	wm_turnoff(struct wm_softc *);
    684 static void	wm_stop(struct ifnet *, int);
    685 static void	wm_stop_locked(struct ifnet *, int);
    686 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    687 static void	wm_82547_txfifo_stall(void *);
    688 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    689 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    690 /* DMA related */
    691 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    692 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    693 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    694 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    695     struct wm_txqueue *);
    696 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    697 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    698 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    699     struct wm_rxqueue *);
    700 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    703 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    704 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    705 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    706 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    707     struct wm_txqueue *);
    708 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    709     struct wm_rxqueue *);
    710 static int	wm_alloc_txrx_queues(struct wm_softc *);
    711 static void	wm_free_txrx_queues(struct wm_softc *);
    712 static int	wm_init_txrx_queues(struct wm_softc *);
    713 /* Start */
    714 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    715     struct wm_txsoft *, uint32_t *, uint8_t *);
    716 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    717 static void	wm_start(struct ifnet *);
    718 static void	wm_start_locked(struct ifnet *);
    719 static int	wm_transmit(struct ifnet *, struct mbuf *);
    720 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    721 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    722 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    723     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    724 static void	wm_nq_start(struct ifnet *);
    725 static void	wm_nq_start_locked(struct ifnet *);
    726 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    727 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    728 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    729 static void	wm_deferred_start_locked(struct wm_txqueue *);
    730 static void	wm_handle_queue(void *);
    731 /* Interrupt */
    732 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    733 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    734 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    735 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    736 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    737 static void	wm_linkintr(struct wm_softc *, uint32_t);
    738 static int	wm_intr_legacy(void *);
    739 static inline void	wm_txrxintr_disable(struct wm_queue *);
    740 static inline void	wm_txrxintr_enable(struct wm_queue *);
    741 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    742 static int	wm_txrxintr_msix(void *);
    743 static int	wm_linkintr_msix(void *);
    744 
    745 /*
    746  * Media related.
    747  * GMII, SGMII, TBI, SERDES and SFP.
    748  */
    749 /* Common */
    750 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    751 /* GMII related */
    752 static void	wm_gmii_reset(struct wm_softc *);
    753 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    754 static int	wm_get_phy_id_82575(struct wm_softc *);
    755 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    756 static int	wm_gmii_mediachange(struct ifnet *);
    757 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    758 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    759 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    760 static int	wm_gmii_i82543_readreg(device_t, int, int);
    761 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    762 static int	wm_gmii_mdic_readreg(device_t, int, int);
    763 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    764 static int	wm_gmii_i82544_readreg(device_t, int, int);
    765 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    766 static int	wm_gmii_i80003_readreg(device_t, int, int);
    767 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    768 static int	wm_gmii_bm_readreg(device_t, int, int);
    769 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    770 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    771 static int	wm_gmii_hv_readreg(device_t, int, int);
    772 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    773 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    774 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    775 static int	wm_gmii_82580_readreg(device_t, int, int);
    776 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    777 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    778 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    779 static void	wm_gmii_statchg(struct ifnet *);
    780 /*
    781  * kumeran related (80003, ICH* and PCH*).
    782  * These functions are not for accessing MII registers but for accessing
    783  * kumeran specific registers.
    784  */
    785 static int	wm_kmrn_readreg(struct wm_softc *, int);
    786 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    787 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    788 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    789 /* SGMII */
    790 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    791 static int	wm_sgmii_readreg(device_t, int, int);
    792 static void	wm_sgmii_writereg(device_t, int, int, int);
    793 /* TBI related */
    794 static void	wm_tbi_mediainit(struct wm_softc *);
    795 static int	wm_tbi_mediachange(struct ifnet *);
    796 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    797 static int	wm_check_for_link(struct wm_softc *);
    798 static void	wm_tbi_tick(struct wm_softc *);
    799 /* SERDES related */
    800 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    801 static int	wm_serdes_mediachange(struct ifnet *);
    802 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    803 static void	wm_serdes_tick(struct wm_softc *);
    804 /* SFP related */
    805 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    806 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    807 
    808 /*
    809  * NVM related.
    810  * Microwire, SPI (w/wo EERD) and Flash.
    811  */
    812 /* Misc functions */
    813 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    814 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    815 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    816 /* Microwire */
    817 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    818 /* SPI */
    819 static int	wm_nvm_ready_spi(struct wm_softc *);
    820 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    821 /* Using with EERD */
    822 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    823 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    824 /* Flash */
    825 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    826     unsigned int *);
    827 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    828 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    829 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    830 	uint32_t *);
    831 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    832 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    833 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    834 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    835 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    836 /* iNVM */
    837 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    838 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    839 /* Lock, detecting NVM type, validate checksum and read */
    840 static int	wm_nvm_acquire(struct wm_softc *);
    841 static void	wm_nvm_release(struct wm_softc *);
    842 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    843 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    844 static int	wm_nvm_validate_checksum(struct wm_softc *);
    845 static void	wm_nvm_version_invm(struct wm_softc *);
    846 static void	wm_nvm_version(struct wm_softc *);
    847 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    848 
    849 /*
    850  * Hardware semaphores.
    851  * Very complexed...
    852  */
    853 static int	wm_get_null(struct wm_softc *);
    854 static void	wm_put_null(struct wm_softc *);
    855 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    856 static void	wm_put_swsm_semaphore(struct wm_softc *);
    857 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    858 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    859 static int	wm_get_phy_82575(struct wm_softc *);
    860 static void	wm_put_phy_82575(struct wm_softc *);
    861 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    862 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    863 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    864 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    865 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    866 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    867 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    868 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    869 
    870 /*
    871  * Management mode and power management related subroutines.
    872  * BMC, AMT, suspend/resume and EEE.
    873  */
    874 #if 0
    875 static int	wm_check_mng_mode(struct wm_softc *);
    876 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    877 static int	wm_check_mng_mode_82574(struct wm_softc *);
    878 static int	wm_check_mng_mode_generic(struct wm_softc *);
    879 #endif
    880 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    881 static bool	wm_phy_resetisblocked(struct wm_softc *);
    882 static void	wm_get_hw_control(struct wm_softc *);
    883 static void	wm_release_hw_control(struct wm_softc *);
    884 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    885 static void	wm_smbustopci(struct wm_softc *);
    886 static void	wm_init_manageability(struct wm_softc *);
    887 static void	wm_release_manageability(struct wm_softc *);
    888 static void	wm_get_wakeup(struct wm_softc *);
    889 static void	wm_ulp_disable(struct wm_softc *);
    890 static void	wm_enable_phy_wakeup(struct wm_softc *);
    891 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    892 static void	wm_enable_wakeup(struct wm_softc *);
    893 /* LPLU (Low Power Link Up) */
    894 static void	wm_lplu_d0_disable(struct wm_softc *);
    895 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    896 /* EEE */
    897 static void	wm_set_eee_i350(struct wm_softc *);
    898 
    899 /*
    900  * Workarounds (mainly PHY related).
    901  * Basically, PHY's workarounds are in the PHY drivers.
    902  */
    903 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    904 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    905 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    906 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    907 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    908 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    909 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    910 static void	wm_reset_init_script_82575(struct wm_softc *);
    911 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    912 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    913 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    914 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    915 static void	wm_pll_workaround_i210(struct wm_softc *);
    916 
    917 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    918     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    919 
    920 /*
    921  * Devices supported by this driver.
    922  */
    923 static const struct wm_product {
    924 	pci_vendor_id_t		wmp_vendor;
    925 	pci_product_id_t	wmp_product;
    926 	const char		*wmp_name;
    927 	wm_chip_type		wmp_type;
    928 	uint32_t		wmp_flags;
    929 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    930 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    931 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    932 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    933 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    934 } wm_products[] = {
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    936 	  "Intel i82542 1000BASE-X Ethernet",
    937 	  WM_T_82542_2_1,	WMP_F_FIBER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    940 	  "Intel i82543GC 1000BASE-X Ethernet",
    941 	  WM_T_82543,		WMP_F_FIBER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    944 	  "Intel i82543GC 1000BASE-T Ethernet",
    945 	  WM_T_82543,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    948 	  "Intel i82544EI 1000BASE-T Ethernet",
    949 	  WM_T_82544,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    952 	  "Intel i82544EI 1000BASE-X Ethernet",
    953 	  WM_T_82544,		WMP_F_FIBER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    956 	  "Intel i82544GC 1000BASE-T Ethernet",
    957 	  WM_T_82544,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    960 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    961 	  WM_T_82544,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    964 	  "Intel i82540EM 1000BASE-T Ethernet",
    965 	  WM_T_82540,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    968 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    969 	  WM_T_82540,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    972 	  "Intel i82540EP 1000BASE-T Ethernet",
    973 	  WM_T_82540,		WMP_F_COPPER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    976 	  "Intel i82540EP 1000BASE-T Ethernet",
    977 	  WM_T_82540,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    980 	  "Intel i82540EP 1000BASE-T Ethernet",
    981 	  WM_T_82540,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    984 	  "Intel i82545EM 1000BASE-T Ethernet",
    985 	  WM_T_82545,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    988 	  "Intel i82545GM 1000BASE-T Ethernet",
    989 	  WM_T_82545_3,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    992 	  "Intel i82545GM 1000BASE-X Ethernet",
    993 	  WM_T_82545_3,		WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    996 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    997 	  WM_T_82545_3,		WMP_F_SERDES },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1000 	  "Intel i82546EB 1000BASE-T Ethernet",
   1001 	  WM_T_82546,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1004 	  "Intel i82546EB 1000BASE-T Ethernet",
   1005 	  WM_T_82546,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1008 	  "Intel i82545EM 1000BASE-X Ethernet",
   1009 	  WM_T_82545,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1012 	  "Intel i82546EB 1000BASE-X Ethernet",
   1013 	  WM_T_82546,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1016 	  "Intel i82546GB 1000BASE-T Ethernet",
   1017 	  WM_T_82546_3,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1020 	  "Intel i82546GB 1000BASE-X Ethernet",
   1021 	  WM_T_82546_3,		WMP_F_FIBER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1024 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1025 	  WM_T_82546_3,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1028 	  "i82546GB quad-port Gigabit Ethernet",
   1029 	  WM_T_82546_3,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1032 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1033 	  WM_T_82546_3,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1036 	  "Intel PRO/1000MT (82546GB)",
   1037 	  WM_T_82546_3,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1040 	  "Intel i82541EI 1000BASE-T Ethernet",
   1041 	  WM_T_82541,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1044 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1045 	  WM_T_82541,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1048 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1049 	  WM_T_82541,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1052 	  "Intel i82541ER 1000BASE-T Ethernet",
   1053 	  WM_T_82541_2,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1056 	  "Intel i82541GI 1000BASE-T Ethernet",
   1057 	  WM_T_82541_2,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1060 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1061 	  WM_T_82541_2,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1064 	  "Intel i82541PI 1000BASE-T Ethernet",
   1065 	  WM_T_82541_2,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1068 	  "Intel i82547EI 1000BASE-T Ethernet",
   1069 	  WM_T_82547,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1072 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1073 	  WM_T_82547,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1076 	  "Intel i82547GI 1000BASE-T Ethernet",
   1077 	  WM_T_82547_2,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1080 	  "Intel PRO/1000 PT (82571EB)",
   1081 	  WM_T_82571,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1084 	  "Intel PRO/1000 PF (82571EB)",
   1085 	  WM_T_82571,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1088 	  "Intel PRO/1000 PB (82571EB)",
   1089 	  WM_T_82571,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1092 	  "Intel PRO/1000 QT (82571EB)",
   1093 	  WM_T_82571,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1096 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1097 	  WM_T_82571,		WMP_F_COPPER, },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1100 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1101 	  WM_T_82571,		WMP_F_COPPER, },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1104 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1105 	  WM_T_82571,		WMP_F_SERDES, },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1108 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1109 	  WM_T_82571,		WMP_F_SERDES, },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1112 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1113 	  WM_T_82571,		WMP_F_FIBER, },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1116 	  "Intel i82572EI 1000baseT Ethernet",
   1117 	  WM_T_82572,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1120 	  "Intel i82572EI 1000baseX Ethernet",
   1121 	  WM_T_82572,		WMP_F_FIBER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1124 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1125 	  WM_T_82572,		WMP_F_SERDES },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1128 	  "Intel i82572EI 1000baseT Ethernet",
   1129 	  WM_T_82572,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1132 	  "Intel i82573E",
   1133 	  WM_T_82573,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1136 	  "Intel i82573E IAMT",
   1137 	  WM_T_82573,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1140 	  "Intel i82573L Gigabit Ethernet",
   1141 	  WM_T_82573,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1144 	  "Intel i82574L",
   1145 	  WM_T_82574,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1148 	  "Intel i82574L",
   1149 	  WM_T_82574,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1152 	  "Intel i82583V",
   1153 	  WM_T_82583,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1156 	  "i80003 dual 1000baseT Ethernet",
   1157 	  WM_T_80003,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1160 	  "i80003 dual 1000baseX Ethernet",
   1161 	  WM_T_80003,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1164 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1165 	  WM_T_80003,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1168 	  "Intel i80003 1000baseT Ethernet",
   1169 	  WM_T_80003,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1172 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1173 	  WM_T_80003,		WMP_F_SERDES },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1176 	  "Intel i82801H (M_AMT) LAN Controller",
   1177 	  WM_T_ICH8,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1179 	  "Intel i82801H (AMT) LAN Controller",
   1180 	  WM_T_ICH8,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1182 	  "Intel i82801H LAN Controller",
   1183 	  WM_T_ICH8,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1185 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1186 	  WM_T_ICH8,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1188 	  "Intel i82801H (M) LAN Controller",
   1189 	  WM_T_ICH8,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1191 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1192 	  WM_T_ICH8,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1194 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1197 	  "82567V-3 LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1200 	  "82801I (AMT) LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1203 	  "82801I 10/100 LAN Controller",
   1204 	  WM_T_ICH9,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1206 	  "82801I (G) 10/100 LAN Controller",
   1207 	  WM_T_ICH9,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1209 	  "82801I (GT) 10/100 LAN Controller",
   1210 	  WM_T_ICH9,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1212 	  "82801I (C) LAN Controller",
   1213 	  WM_T_ICH9,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1215 	  "82801I mobile LAN Controller",
   1216 	  WM_T_ICH9,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1218 	  "82801I mobile (V) LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1221 	  "82801I mobile (AMT) LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1224 	  "82567LM-4 LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1227 	  "82567LM-2 LAN Controller",
   1228 	  WM_T_ICH10,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1230 	  "82567LF-2 LAN Controller",
   1231 	  WM_T_ICH10,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1233 	  "82567LM-3 LAN Controller",
   1234 	  WM_T_ICH10,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1236 	  "82567LF-3 LAN Controller",
   1237 	  WM_T_ICH10,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1239 	  "82567V-2 LAN Controller",
   1240 	  WM_T_ICH10,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1242 	  "82567V-3? LAN Controller",
   1243 	  WM_T_ICH10,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1245 	  "HANKSVILLE LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1248 	  "PCH LAN (82577LM) Controller",
   1249 	  WM_T_PCH,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1251 	  "PCH LAN (82577LC) Controller",
   1252 	  WM_T_PCH,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1254 	  "PCH LAN (82578DM) Controller",
   1255 	  WM_T_PCH,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1257 	  "PCH LAN (82578DC) Controller",
   1258 	  WM_T_PCH,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1260 	  "PCH2 LAN (82579LM) Controller",
   1261 	  WM_T_PCH2,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1263 	  "PCH2 LAN (82579V) Controller",
   1264 	  WM_T_PCH2,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1266 	  "82575EB dual-1000baseT Ethernet",
   1267 	  WM_T_82575,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1269 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1270 	  WM_T_82575,		WMP_F_SERDES },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1272 	  "82575GB quad-1000baseT Ethernet",
   1273 	  WM_T_82575,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1275 	  "82575GB quad-1000baseT Ethernet (PM)",
   1276 	  WM_T_82575,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1278 	  "82576 1000BaseT Ethernet",
   1279 	  WM_T_82576,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1281 	  "82576 1000BaseX Ethernet",
   1282 	  WM_T_82576,		WMP_F_FIBER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1285 	  "82576 gigabit Ethernet (SERDES)",
   1286 	  WM_T_82576,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1289 	  "82576 quad-1000BaseT Ethernet",
   1290 	  WM_T_82576,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1293 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1294 	  WM_T_82576,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1297 	  "82576 gigabit Ethernet",
   1298 	  WM_T_82576,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1301 	  "82576 gigabit Ethernet (SERDES)",
   1302 	  WM_T_82576,		WMP_F_SERDES },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1304 	  "82576 quad-gigabit Ethernet (SERDES)",
   1305 	  WM_T_82576,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1308 	  "82580 1000BaseT Ethernet",
   1309 	  WM_T_82580,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1311 	  "82580 1000BaseX Ethernet",
   1312 	  WM_T_82580,		WMP_F_FIBER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1315 	  "82580 1000BaseT Ethernet (SERDES)",
   1316 	  WM_T_82580,		WMP_F_SERDES },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1319 	  "82580 gigabit Ethernet (SGMII)",
   1320 	  WM_T_82580,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1322 	  "82580 dual-1000BaseT Ethernet",
   1323 	  WM_T_82580,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1326 	  "82580 quad-1000BaseX Ethernet",
   1327 	  WM_T_82580,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1330 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1331 	  WM_T_82580,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1334 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1335 	  WM_T_82580,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1338 	  "DH89XXCC 1000BASE-KX Ethernet",
   1339 	  WM_T_82580,		WMP_F_SERDES },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1342 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1343 	  WM_T_82580,		WMP_F_SERDES },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1346 	  "I350 Gigabit Network Connection",
   1347 	  WM_T_I350,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1350 	  "I350 Gigabit Fiber Network Connection",
   1351 	  WM_T_I350,		WMP_F_FIBER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1354 	  "I350 Gigabit Backplane Connection",
   1355 	  WM_T_I350,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1358 	  "I350 Quad Port Gigabit Ethernet",
   1359 	  WM_T_I350,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1362 	  "I350 Gigabit Connection",
   1363 	  WM_T_I350,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1366 	  "I354 Gigabit Ethernet (KX)",
   1367 	  WM_T_I354,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1370 	  "I354 Gigabit Ethernet (SGMII)",
   1371 	  WM_T_I354,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1374 	  "I354 Gigabit Ethernet (2.5G)",
   1375 	  WM_T_I354,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1378 	  "I210-T1 Ethernet Server Adapter",
   1379 	  WM_T_I210,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1382 	  "I210 Ethernet (Copper OEM)",
   1383 	  WM_T_I210,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1386 	  "I210 Ethernet (Copper IT)",
   1387 	  WM_T_I210,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1390 	  "I210 Ethernet (FLASH less)",
   1391 	  WM_T_I210,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1394 	  "I210 Gigabit Ethernet (Fiber)",
   1395 	  WM_T_I210,		WMP_F_FIBER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1398 	  "I210 Gigabit Ethernet (SERDES)",
   1399 	  WM_T_I210,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1402 	  "I210 Gigabit Ethernet (FLASH less)",
   1403 	  WM_T_I210,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1406 	  "I210 Gigabit Ethernet (SGMII)",
   1407 	  WM_T_I210,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1410 	  "I211 Ethernet (COPPER)",
   1411 	  WM_T_I211,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1413 	  "I217 V Ethernet Connection",
   1414 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1416 	  "I217 LM Ethernet Connection",
   1417 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1419 	  "I218 V Ethernet Connection",
   1420 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1422 	  "I218 V Ethernet Connection",
   1423 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1425 	  "I218 V Ethernet Connection",
   1426 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1428 	  "I218 LM Ethernet Connection",
   1429 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1431 	  "I218 LM Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1434 	  "I218 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 #if 0
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1438 	  "I219 V Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1441 	  "I219 V Ethernet Connection",
   1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1444 	  "I219 V Ethernet Connection",
   1445 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1447 	  "I219 V Ethernet Connection",
   1448 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1450 	  "I219 LM Ethernet Connection",
   1451 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1453 	  "I219 LM Ethernet Connection",
   1454 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1456 	  "I219 LM Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1459 	  "I219 LM Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1462 	  "I219 LM Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 #endif
   1465 	{ 0,			0,
   1466 	  NULL,
   1467 	  0,			0 },
   1468 };
   1469 
   1470 /*
   1471  * Register read/write functions.
   1472  * Other than CSR_{READ|WRITE}().
   1473  */
   1474 
   1475 #if 0 /* Not currently used */
   1476 static inline uint32_t
   1477 wm_io_read(struct wm_softc *sc, int reg)
   1478 {
   1479 
   1480 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1481 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1482 }
   1483 #endif
   1484 
   1485 static inline void
   1486 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1487 {
   1488 
   1489 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1490 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1491 }
   1492 
   1493 static inline void
   1494 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1495     uint32_t data)
   1496 {
   1497 	uint32_t regval;
   1498 	int i;
   1499 
   1500 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1501 
   1502 	CSR_WRITE(sc, reg, regval);
   1503 
   1504 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1505 		delay(5);
   1506 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1507 			break;
   1508 	}
   1509 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1510 		aprint_error("%s: WARNING:"
   1511 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1512 		    device_xname(sc->sc_dev), reg);
   1513 	}
   1514 }
   1515 
   1516 static inline void
   1517 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1518 {
   1519 	wa->wa_low = htole32(v & 0xffffffffU);
   1520 	if (sizeof(bus_addr_t) == 8)
   1521 		wa->wa_high = htole32((uint64_t) v >> 32);
   1522 	else
   1523 		wa->wa_high = 0;
   1524 }
   1525 
   1526 /*
   1527  * Descriptor sync/init functions.
   1528  */
   1529 static inline void
   1530 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1531 {
   1532 	struct wm_softc *sc = txq->txq_sc;
   1533 
   1534 	/* If it will wrap around, sync to the end of the ring. */
   1535 	if ((start + num) > WM_NTXDESC(txq)) {
   1536 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1537 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1538 		    (WM_NTXDESC(txq) - start), ops);
   1539 		num -= (WM_NTXDESC(txq) - start);
   1540 		start = 0;
   1541 	}
   1542 
   1543 	/* Now sync whatever is left. */
   1544 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1545 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1546 }
   1547 
   1548 static inline void
   1549 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1550 {
   1551 	struct wm_softc *sc = rxq->rxq_sc;
   1552 
   1553 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1554 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1555 }
   1556 
   1557 static inline void
   1558 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1559 {
   1560 	struct wm_softc *sc = rxq->rxq_sc;
   1561 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1562 	struct mbuf *m = rxs->rxs_mbuf;
   1563 
   1564 	/*
   1565 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1566 	 * so that the payload after the Ethernet header is aligned
   1567 	 * to a 4-byte boundary.
   1568 
   1569 	 * XXX BRAINDAMAGE ALERT!
   1570 	 * The stupid chip uses the same size for every buffer, which
   1571 	 * is set in the Receive Control register.  We are using the 2K
   1572 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1573 	 * reason, we can't "scoot" packets longer than the standard
   1574 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1575 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1576 	 * the upper layer copy the headers.
   1577 	 */
   1578 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1579 
   1580 	if (sc->sc_type == WM_T_82574) {
   1581 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1582 		rxd->erx_data.erxd_addr =
   1583 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1584 		rxd->erx_data.erxd_dd = 0;
   1585 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1586 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1587 
   1588 		rxd->nqrx_data.nrxd_paddr =
   1589 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1590 		/* Currently, split header is not supported. */
   1591 		rxd->nqrx_data.nrxd_haddr = 0;
   1592 	} else {
   1593 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1594 
   1595 		wm_set_dma_addr(&rxd->wrx_addr,
   1596 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1597 		rxd->wrx_len = 0;
   1598 		rxd->wrx_cksum = 0;
   1599 		rxd->wrx_status = 0;
   1600 		rxd->wrx_errors = 0;
   1601 		rxd->wrx_special = 0;
   1602 	}
   1603 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1604 
   1605 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1606 }
   1607 
   1608 /*
   1609  * Device driver interface functions and commonly used functions.
   1610  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1611  */
   1612 
   1613 /* Lookup supported device table */
   1614 static const struct wm_product *
   1615 wm_lookup(const struct pci_attach_args *pa)
   1616 {
   1617 	const struct wm_product *wmp;
   1618 
   1619 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1620 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1621 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1622 			return wmp;
   1623 	}
   1624 	return NULL;
   1625 }
   1626 
   1627 /* The match function (ca_match) */
   1628 static int
   1629 wm_match(device_t parent, cfdata_t cf, void *aux)
   1630 {
   1631 	struct pci_attach_args *pa = aux;
   1632 
   1633 	if (wm_lookup(pa) != NULL)
   1634 		return 1;
   1635 
   1636 	return 0;
   1637 }
   1638 
   1639 /* The attach function (ca_attach) */
   1640 static void
   1641 wm_attach(device_t parent, device_t self, void *aux)
   1642 {
   1643 	struct wm_softc *sc = device_private(self);
   1644 	struct pci_attach_args *pa = aux;
   1645 	prop_dictionary_t dict;
   1646 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1647 	pci_chipset_tag_t pc = pa->pa_pc;
   1648 	int counts[PCI_INTR_TYPE_SIZE];
   1649 	pci_intr_type_t max_type;
   1650 	const char *eetype, *xname;
   1651 	bus_space_tag_t memt;
   1652 	bus_space_handle_t memh;
   1653 	bus_size_t memsize;
   1654 	int memh_valid;
   1655 	int i, error;
   1656 	const struct wm_product *wmp;
   1657 	prop_data_t ea;
   1658 	prop_number_t pn;
   1659 	uint8_t enaddr[ETHER_ADDR_LEN];
   1660 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1661 	pcireg_t preg, memtype;
   1662 	uint16_t eeprom_data, apme_mask;
   1663 	bool force_clear_smbi;
   1664 	uint32_t link_mode;
   1665 	uint32_t reg;
   1666 
   1667 	sc->sc_dev = self;
   1668 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1669 	sc->sc_core_stopping = false;
   1670 
   1671 	wmp = wm_lookup(pa);
   1672 #ifdef DIAGNOSTIC
   1673 	if (wmp == NULL) {
   1674 		printf("\n");
   1675 		panic("wm_attach: impossible");
   1676 	}
   1677 #endif
   1678 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1679 
   1680 	sc->sc_pc = pa->pa_pc;
   1681 	sc->sc_pcitag = pa->pa_tag;
   1682 
   1683 	if (pci_dma64_available(pa))
   1684 		sc->sc_dmat = pa->pa_dmat64;
   1685 	else
   1686 		sc->sc_dmat = pa->pa_dmat;
   1687 
   1688 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1689 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1690 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1691 
   1692 	sc->sc_type = wmp->wmp_type;
   1693 
   1694 	/* Set default function pointers */
   1695 	sc->phy.acquire = wm_get_null;
   1696 	sc->phy.release = wm_put_null;
   1697 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1698 
   1699 	if (sc->sc_type < WM_T_82543) {
   1700 		if (sc->sc_rev < 2) {
   1701 			aprint_error_dev(sc->sc_dev,
   1702 			    "i82542 must be at least rev. 2\n");
   1703 			return;
   1704 		}
   1705 		if (sc->sc_rev < 3)
   1706 			sc->sc_type = WM_T_82542_2_0;
   1707 	}
   1708 
   1709 	/*
   1710 	 * Disable MSI for Errata:
   1711 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1712 	 *
   1713 	 *  82544: Errata 25
   1714 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1715 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1716 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1717 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1718 	 *
   1719 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1720 	 *
   1721 	 *  82571 & 82572: Errata 63
   1722 	 */
   1723 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1724 	    || (sc->sc_type == WM_T_82572))
   1725 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1726 
   1727 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1728 	    || (sc->sc_type == WM_T_82580)
   1729 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1730 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1731 		sc->sc_flags |= WM_F_NEWQUEUE;
   1732 
   1733 	/* Set device properties (mactype) */
   1734 	dict = device_properties(sc->sc_dev);
   1735 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1736 
   1737 	/*
   1738 	 * Map the device.  All devices support memory-mapped acccess,
   1739 	 * and it is really required for normal operation.
   1740 	 */
   1741 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1742 	switch (memtype) {
   1743 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1744 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1745 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1746 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1747 		break;
   1748 	default:
   1749 		memh_valid = 0;
   1750 		break;
   1751 	}
   1752 
   1753 	if (memh_valid) {
   1754 		sc->sc_st = memt;
   1755 		sc->sc_sh = memh;
   1756 		sc->sc_ss = memsize;
   1757 	} else {
   1758 		aprint_error_dev(sc->sc_dev,
   1759 		    "unable to map device registers\n");
   1760 		return;
   1761 	}
   1762 
   1763 	/*
   1764 	 * In addition, i82544 and later support I/O mapped indirect
   1765 	 * register access.  It is not desirable (nor supported in
   1766 	 * this driver) to use it for normal operation, though it is
   1767 	 * required to work around bugs in some chip versions.
   1768 	 */
   1769 	if (sc->sc_type >= WM_T_82544) {
   1770 		/* First we have to find the I/O BAR. */
   1771 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1772 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1773 			if (memtype == PCI_MAPREG_TYPE_IO)
   1774 				break;
   1775 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1776 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1777 				i += 4;	/* skip high bits, too */
   1778 		}
   1779 		if (i < PCI_MAPREG_END) {
   1780 			/*
   1781 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1782 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1783 			 * It's no problem because newer chips has no this
   1784 			 * bug.
   1785 			 *
   1786 			 * The i8254x doesn't apparently respond when the
   1787 			 * I/O BAR is 0, which looks somewhat like it's not
   1788 			 * been configured.
   1789 			 */
   1790 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1791 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1792 				aprint_error_dev(sc->sc_dev,
   1793 				    "WARNING: I/O BAR at zero.\n");
   1794 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1795 					0, &sc->sc_iot, &sc->sc_ioh,
   1796 					NULL, &sc->sc_ios) == 0) {
   1797 				sc->sc_flags |= WM_F_IOH_VALID;
   1798 			} else {
   1799 				aprint_error_dev(sc->sc_dev,
   1800 				    "WARNING: unable to map I/O space\n");
   1801 			}
   1802 		}
   1803 
   1804 	}
   1805 
   1806 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1807 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1808 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1809 	if (sc->sc_type < WM_T_82542_2_1)
   1810 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1811 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1812 
   1813 	/* power up chip */
   1814 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1815 	    NULL)) && error != EOPNOTSUPP) {
   1816 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1817 		return;
   1818 	}
   1819 
   1820 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1821 
   1822 	/* Allocation settings */
   1823 	max_type = PCI_INTR_TYPE_MSIX;
   1824 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1825 	counts[PCI_INTR_TYPE_MSI] = 1;
   1826 	counts[PCI_INTR_TYPE_INTX] = 1;
   1827 
   1828 alloc_retry:
   1829 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1830 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1831 		return;
   1832 	}
   1833 
   1834 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1835 		error = wm_setup_msix(sc);
   1836 		if (error) {
   1837 			pci_intr_release(pc, sc->sc_intrs,
   1838 			    counts[PCI_INTR_TYPE_MSIX]);
   1839 
   1840 			/* Setup for MSI: Disable MSI-X */
   1841 			max_type = PCI_INTR_TYPE_MSI;
   1842 			counts[PCI_INTR_TYPE_MSI] = 1;
   1843 			counts[PCI_INTR_TYPE_INTX] = 1;
   1844 			goto alloc_retry;
   1845 		}
   1846 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1847 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1848 		error = wm_setup_legacy(sc);
   1849 		if (error) {
   1850 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1851 			    counts[PCI_INTR_TYPE_MSI]);
   1852 
   1853 			/* The next try is for INTx: Disable MSI */
   1854 			max_type = PCI_INTR_TYPE_INTX;
   1855 			counts[PCI_INTR_TYPE_INTX] = 1;
   1856 			goto alloc_retry;
   1857 		}
   1858 	} else {
   1859 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1860 		error = wm_setup_legacy(sc);
   1861 		if (error) {
   1862 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1863 			    counts[PCI_INTR_TYPE_INTX]);
   1864 			return;
   1865 		}
   1866 	}
   1867 
   1868 	/*
   1869 	 * Check the function ID (unit number of the chip).
   1870 	 */
   1871 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1872 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1873 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1874 	    || (sc->sc_type == WM_T_82580)
   1875 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1876 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1877 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1878 	else
   1879 		sc->sc_funcid = 0;
   1880 
   1881 	/*
   1882 	 * Determine a few things about the bus we're connected to.
   1883 	 */
   1884 	if (sc->sc_type < WM_T_82543) {
   1885 		/* We don't really know the bus characteristics here. */
   1886 		sc->sc_bus_speed = 33;
   1887 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1888 		/*
   1889 		 * CSA (Communication Streaming Architecture) is about as fast
   1890 		 * a 32-bit 66MHz PCI Bus.
   1891 		 */
   1892 		sc->sc_flags |= WM_F_CSA;
   1893 		sc->sc_bus_speed = 66;
   1894 		aprint_verbose_dev(sc->sc_dev,
   1895 		    "Communication Streaming Architecture\n");
   1896 		if (sc->sc_type == WM_T_82547) {
   1897 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1898 			callout_setfunc(&sc->sc_txfifo_ch,
   1899 					wm_82547_txfifo_stall, sc);
   1900 			aprint_verbose_dev(sc->sc_dev,
   1901 			    "using 82547 Tx FIFO stall work-around\n");
   1902 		}
   1903 	} else if (sc->sc_type >= WM_T_82571) {
   1904 		sc->sc_flags |= WM_F_PCIE;
   1905 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1906 		    && (sc->sc_type != WM_T_ICH10)
   1907 		    && (sc->sc_type != WM_T_PCH)
   1908 		    && (sc->sc_type != WM_T_PCH2)
   1909 		    && (sc->sc_type != WM_T_PCH_LPT)
   1910 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1911 			/* ICH* and PCH* have no PCIe capability registers */
   1912 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1913 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1914 				NULL) == 0)
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unable to find PCIe capability\n");
   1917 		}
   1918 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1919 	} else {
   1920 		reg = CSR_READ(sc, WMREG_STATUS);
   1921 		if (reg & STATUS_BUS64)
   1922 			sc->sc_flags |= WM_F_BUS64;
   1923 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1924 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1925 
   1926 			sc->sc_flags |= WM_F_PCIX;
   1927 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1928 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1929 				aprint_error_dev(sc->sc_dev,
   1930 				    "unable to find PCIX capability\n");
   1931 			else if (sc->sc_type != WM_T_82545_3 &&
   1932 				 sc->sc_type != WM_T_82546_3) {
   1933 				/*
   1934 				 * Work around a problem caused by the BIOS
   1935 				 * setting the max memory read byte count
   1936 				 * incorrectly.
   1937 				 */
   1938 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1939 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1940 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1941 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1942 
   1943 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1944 				    PCIX_CMD_BYTECNT_SHIFT;
   1945 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1946 				    PCIX_STATUS_MAXB_SHIFT;
   1947 				if (bytecnt > maxb) {
   1948 					aprint_verbose_dev(sc->sc_dev,
   1949 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1950 					    512 << bytecnt, 512 << maxb);
   1951 					pcix_cmd = (pcix_cmd &
   1952 					    ~PCIX_CMD_BYTECNT_MASK) |
   1953 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1954 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1955 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1956 					    pcix_cmd);
   1957 				}
   1958 			}
   1959 		}
   1960 		/*
   1961 		 * The quad port adapter is special; it has a PCIX-PCIX
   1962 		 * bridge on the board, and can run the secondary bus at
   1963 		 * a higher speed.
   1964 		 */
   1965 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1966 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1967 								      : 66;
   1968 		} else if (sc->sc_flags & WM_F_PCIX) {
   1969 			switch (reg & STATUS_PCIXSPD_MASK) {
   1970 			case STATUS_PCIXSPD_50_66:
   1971 				sc->sc_bus_speed = 66;
   1972 				break;
   1973 			case STATUS_PCIXSPD_66_100:
   1974 				sc->sc_bus_speed = 100;
   1975 				break;
   1976 			case STATUS_PCIXSPD_100_133:
   1977 				sc->sc_bus_speed = 133;
   1978 				break;
   1979 			default:
   1980 				aprint_error_dev(sc->sc_dev,
   1981 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1982 				    reg & STATUS_PCIXSPD_MASK);
   1983 				sc->sc_bus_speed = 66;
   1984 				break;
   1985 			}
   1986 		} else
   1987 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1988 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1989 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1990 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1991 	}
   1992 
   1993 	/* clear interesting stat counters */
   1994 	CSR_READ(sc, WMREG_COLC);
   1995 	CSR_READ(sc, WMREG_RXERRC);
   1996 
   1997 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1998 	    || (sc->sc_type >= WM_T_ICH8))
   1999 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2000 	if (sc->sc_type >= WM_T_ICH8)
   2001 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2002 
   2003 	/* Set PHY, NVM mutex related stuff */
   2004 	switch (sc->sc_type) {
   2005 	case WM_T_82542_2_0:
   2006 	case WM_T_82542_2_1:
   2007 	case WM_T_82543:
   2008 	case WM_T_82544:
   2009 		/* Microwire */
   2010 		sc->sc_nvm_wordsize = 64;
   2011 		sc->sc_nvm_addrbits = 6;
   2012 		break;
   2013 	case WM_T_82540:
   2014 	case WM_T_82545:
   2015 	case WM_T_82545_3:
   2016 	case WM_T_82546:
   2017 	case WM_T_82546_3:
   2018 		/* Microwire */
   2019 		reg = CSR_READ(sc, WMREG_EECD);
   2020 		if (reg & EECD_EE_SIZE) {
   2021 			sc->sc_nvm_wordsize = 256;
   2022 			sc->sc_nvm_addrbits = 8;
   2023 		} else {
   2024 			sc->sc_nvm_wordsize = 64;
   2025 			sc->sc_nvm_addrbits = 6;
   2026 		}
   2027 		sc->sc_flags |= WM_F_LOCK_EECD;
   2028 		break;
   2029 	case WM_T_82541:
   2030 	case WM_T_82541_2:
   2031 	case WM_T_82547:
   2032 	case WM_T_82547_2:
   2033 		sc->sc_flags |= WM_F_LOCK_EECD;
   2034 		reg = CSR_READ(sc, WMREG_EECD);
   2035 		if (reg & EECD_EE_TYPE) {
   2036 			/* SPI */
   2037 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2038 			wm_nvm_set_addrbits_size_eecd(sc);
   2039 		} else {
   2040 			/* Microwire */
   2041 			if ((reg & EECD_EE_ABITS) != 0) {
   2042 				sc->sc_nvm_wordsize = 256;
   2043 				sc->sc_nvm_addrbits = 8;
   2044 			} else {
   2045 				sc->sc_nvm_wordsize = 64;
   2046 				sc->sc_nvm_addrbits = 6;
   2047 			}
   2048 		}
   2049 		break;
   2050 	case WM_T_82571:
   2051 	case WM_T_82572:
   2052 		/* SPI */
   2053 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2054 		wm_nvm_set_addrbits_size_eecd(sc);
   2055 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2056 		sc->phy.acquire = wm_get_swsm_semaphore;
   2057 		sc->phy.release = wm_put_swsm_semaphore;
   2058 		break;
   2059 	case WM_T_82573:
   2060 	case WM_T_82574:
   2061 	case WM_T_82583:
   2062 		if (sc->sc_type == WM_T_82573) {
   2063 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2064 			sc->phy.acquire = wm_get_swsm_semaphore;
   2065 			sc->phy.release = wm_put_swsm_semaphore;
   2066 		} else {
   2067 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2068 			/* Both PHY and NVM use the same semaphore. */
   2069 			sc->phy.acquire
   2070 			    = wm_get_swfwhw_semaphore;
   2071 			sc->phy.release
   2072 			    = wm_put_swfwhw_semaphore;
   2073 		}
   2074 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2075 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2076 			sc->sc_nvm_wordsize = 2048;
   2077 		} else {
   2078 			/* SPI */
   2079 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2080 			wm_nvm_set_addrbits_size_eecd(sc);
   2081 		}
   2082 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2083 		break;
   2084 	case WM_T_82575:
   2085 	case WM_T_82576:
   2086 	case WM_T_82580:
   2087 	case WM_T_I350:
   2088 	case WM_T_I354:
   2089 	case WM_T_80003:
   2090 		/* SPI */
   2091 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 		wm_nvm_set_addrbits_size_eecd(sc);
   2093 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2094 		    | WM_F_LOCK_SWSM;
   2095 		sc->phy.acquire = wm_get_phy_82575;
   2096 		sc->phy.release = wm_put_phy_82575;
   2097 		break;
   2098 	case WM_T_ICH8:
   2099 	case WM_T_ICH9:
   2100 	case WM_T_ICH10:
   2101 	case WM_T_PCH:
   2102 	case WM_T_PCH2:
   2103 	case WM_T_PCH_LPT:
   2104 		/* FLASH */
   2105 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2106 		sc->sc_nvm_wordsize = 2048;
   2107 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2108 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2109 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2110 			aprint_error_dev(sc->sc_dev,
   2111 			    "can't map FLASH registers\n");
   2112 			goto out;
   2113 		}
   2114 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2115 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2116 		    ICH_FLASH_SECTOR_SIZE;
   2117 		sc->sc_ich8_flash_bank_size =
   2118 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2119 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2120 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2121 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2122 		sc->sc_flashreg_offset = 0;
   2123 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2124 		sc->phy.release = wm_put_swflag_ich8lan;
   2125 		break;
   2126 	case WM_T_PCH_SPT:
   2127 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2128 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2129 		sc->sc_flasht = sc->sc_st;
   2130 		sc->sc_flashh = sc->sc_sh;
   2131 		sc->sc_ich8_flash_base = 0;
   2132 		sc->sc_nvm_wordsize =
   2133 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2134 			* NVM_SIZE_MULTIPLIER;
   2135 		/* It is size in bytes, we want words */
   2136 		sc->sc_nvm_wordsize /= 2;
   2137 		/* assume 2 banks */
   2138 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2139 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2140 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2141 		sc->phy.release = wm_put_swflag_ich8lan;
   2142 		break;
   2143 	case WM_T_I210:
   2144 	case WM_T_I211:
   2145 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2148 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2149 		} else {
   2150 			sc->sc_nvm_wordsize = INVM_SIZE;
   2151 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2152 		}
   2153 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2154 		sc->phy.acquire = wm_get_phy_82575;
   2155 		sc->phy.release = wm_put_phy_82575;
   2156 		break;
   2157 	default:
   2158 		break;
   2159 	}
   2160 
   2161 	/* Reset the chip to a known state. */
   2162 	wm_reset(sc);
   2163 
   2164 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2165 	switch (sc->sc_type) {
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 		reg = CSR_READ(sc, WMREG_SWSM2);
   2169 		if ((reg & SWSM2_LOCK) == 0) {
   2170 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2171 			force_clear_smbi = true;
   2172 		} else
   2173 			force_clear_smbi = false;
   2174 		break;
   2175 	case WM_T_82573:
   2176 	case WM_T_82574:
   2177 	case WM_T_82583:
   2178 		force_clear_smbi = true;
   2179 		break;
   2180 	default:
   2181 		force_clear_smbi = false;
   2182 		break;
   2183 	}
   2184 	if (force_clear_smbi) {
   2185 		reg = CSR_READ(sc, WMREG_SWSM);
   2186 		if ((reg & SWSM_SMBI) != 0)
   2187 			aprint_error_dev(sc->sc_dev,
   2188 			    "Please update the Bootagent\n");
   2189 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2190 	}
   2191 
   2192 	/*
   2193 	 * Defer printing the EEPROM type until after verifying the checksum
   2194 	 * This allows the EEPROM type to be printed correctly in the case
   2195 	 * that no EEPROM is attached.
   2196 	 */
   2197 	/*
   2198 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2199 	 * this for later, so we can fail future reads from the EEPROM.
   2200 	 */
   2201 	if (wm_nvm_validate_checksum(sc)) {
   2202 		/*
   2203 		 * Read twice again because some PCI-e parts fail the
   2204 		 * first check due to the link being in sleep state.
   2205 		 */
   2206 		if (wm_nvm_validate_checksum(sc))
   2207 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2208 	}
   2209 
   2210 	/* Set device properties (macflags) */
   2211 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2212 
   2213 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2214 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2215 	else {
   2216 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2217 		    sc->sc_nvm_wordsize);
   2218 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2219 			aprint_verbose("iNVM");
   2220 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2221 			aprint_verbose("FLASH(HW)");
   2222 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2223 			aprint_verbose("FLASH");
   2224 		else {
   2225 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2226 				eetype = "SPI";
   2227 			else
   2228 				eetype = "MicroWire";
   2229 			aprint_verbose("(%d address bits) %s EEPROM",
   2230 			    sc->sc_nvm_addrbits, eetype);
   2231 		}
   2232 	}
   2233 	wm_nvm_version(sc);
   2234 	aprint_verbose("\n");
   2235 
   2236 	/* Check for I21[01] PLL workaround */
   2237 	if (sc->sc_type == WM_T_I210)
   2238 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2239 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2240 		/* NVM image release 3.25 has a workaround */
   2241 		if ((sc->sc_nvm_ver_major < 3)
   2242 		    || ((sc->sc_nvm_ver_major == 3)
   2243 			&& (sc->sc_nvm_ver_minor < 25))) {
   2244 			aprint_verbose_dev(sc->sc_dev,
   2245 			    "ROM image version %d.%d is older than 3.25\n",
   2246 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2247 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2248 		}
   2249 	}
   2250 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2251 		wm_pll_workaround_i210(sc);
   2252 
   2253 	wm_get_wakeup(sc);
   2254 
   2255 	/* Non-AMT based hardware can now take control from firmware */
   2256 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2257 		wm_get_hw_control(sc);
   2258 
   2259 	/*
   2260 	 * Read the Ethernet address from the EEPROM, if not first found
   2261 	 * in device properties.
   2262 	 */
   2263 	ea = prop_dictionary_get(dict, "mac-address");
   2264 	if (ea != NULL) {
   2265 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2266 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2267 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2268 	} else {
   2269 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2270 			aprint_error_dev(sc->sc_dev,
   2271 			    "unable to read Ethernet address\n");
   2272 			goto out;
   2273 		}
   2274 	}
   2275 
   2276 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2277 	    ether_sprintf(enaddr));
   2278 
   2279 	/*
   2280 	 * Read the config info from the EEPROM, and set up various
   2281 	 * bits in the control registers based on their contents.
   2282 	 */
   2283 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2284 	if (pn != NULL) {
   2285 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2286 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2287 	} else {
   2288 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2289 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2290 			goto out;
   2291 		}
   2292 	}
   2293 
   2294 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2295 	if (pn != NULL) {
   2296 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2297 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2298 	} else {
   2299 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2300 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	/* check for WM_F_WOL */
   2306 	switch (sc->sc_type) {
   2307 	case WM_T_82542_2_0:
   2308 	case WM_T_82542_2_1:
   2309 	case WM_T_82543:
   2310 		/* dummy? */
   2311 		eeprom_data = 0;
   2312 		apme_mask = NVM_CFG3_APME;
   2313 		break;
   2314 	case WM_T_82544:
   2315 		apme_mask = NVM_CFG2_82544_APM_EN;
   2316 		eeprom_data = cfg2;
   2317 		break;
   2318 	case WM_T_82546:
   2319 	case WM_T_82546_3:
   2320 	case WM_T_82571:
   2321 	case WM_T_82572:
   2322 	case WM_T_82573:
   2323 	case WM_T_82574:
   2324 	case WM_T_82583:
   2325 	case WM_T_80003:
   2326 	default:
   2327 		apme_mask = NVM_CFG3_APME;
   2328 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2329 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2330 		break;
   2331 	case WM_T_82575:
   2332 	case WM_T_82576:
   2333 	case WM_T_82580:
   2334 	case WM_T_I350:
   2335 	case WM_T_I354: /* XXX ok? */
   2336 	case WM_T_ICH8:
   2337 	case WM_T_ICH9:
   2338 	case WM_T_ICH10:
   2339 	case WM_T_PCH:
   2340 	case WM_T_PCH2:
   2341 	case WM_T_PCH_LPT:
   2342 	case WM_T_PCH_SPT:
   2343 		/* XXX The funcid should be checked on some devices */
   2344 		apme_mask = WUC_APME;
   2345 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2346 		break;
   2347 	}
   2348 
   2349 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2350 	if ((eeprom_data & apme_mask) != 0)
   2351 		sc->sc_flags |= WM_F_WOL;
   2352 #ifdef WM_DEBUG
   2353 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2354 		printf("WOL\n");
   2355 #endif
   2356 
   2357 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2358 		/* Check NVM for autonegotiation */
   2359 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2360 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2361 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2362 		}
   2363 	}
   2364 
   2365 	/*
   2366 	 * XXX need special handling for some multiple port cards
   2367 	 * to disable a paticular port.
   2368 	 */
   2369 
   2370 	if (sc->sc_type >= WM_T_82544) {
   2371 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2372 		if (pn != NULL) {
   2373 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2374 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2375 		} else {
   2376 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2377 				aprint_error_dev(sc->sc_dev,
   2378 				    "unable to read SWDPIN\n");
   2379 				goto out;
   2380 			}
   2381 		}
   2382 	}
   2383 
   2384 	if (cfg1 & NVM_CFG1_ILOS)
   2385 		sc->sc_ctrl |= CTRL_ILOS;
   2386 
   2387 	/*
   2388 	 * XXX
   2389 	 * This code isn't correct because pin 2 and 3 are located
   2390 	 * in different position on newer chips. Check all datasheet.
   2391 	 *
   2392 	 * Until resolve this problem, check if a chip < 82580
   2393 	 */
   2394 	if (sc->sc_type <= WM_T_82580) {
   2395 		if (sc->sc_type >= WM_T_82544) {
   2396 			sc->sc_ctrl |=
   2397 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2398 			    CTRL_SWDPIO_SHIFT;
   2399 			sc->sc_ctrl |=
   2400 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2401 			    CTRL_SWDPINS_SHIFT;
   2402 		} else {
   2403 			sc->sc_ctrl |=
   2404 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2405 			    CTRL_SWDPIO_SHIFT;
   2406 		}
   2407 	}
   2408 
   2409 	/* XXX For other than 82580? */
   2410 	if (sc->sc_type == WM_T_82580) {
   2411 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2412 		if (nvmword & __BIT(13))
   2413 			sc->sc_ctrl |= CTRL_ILOS;
   2414 	}
   2415 
   2416 #if 0
   2417 	if (sc->sc_type >= WM_T_82544) {
   2418 		if (cfg1 & NVM_CFG1_IPS0)
   2419 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2420 		if (cfg1 & NVM_CFG1_IPS1)
   2421 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2422 		sc->sc_ctrl_ext |=
   2423 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2424 		    CTRL_EXT_SWDPIO_SHIFT;
   2425 		sc->sc_ctrl_ext |=
   2426 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2427 		    CTRL_EXT_SWDPINS_SHIFT;
   2428 	} else {
   2429 		sc->sc_ctrl_ext |=
   2430 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2431 		    CTRL_EXT_SWDPIO_SHIFT;
   2432 	}
   2433 #endif
   2434 
   2435 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2436 #if 0
   2437 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2438 #endif
   2439 
   2440 	if (sc->sc_type == WM_T_PCH) {
   2441 		uint16_t val;
   2442 
   2443 		/* Save the NVM K1 bit setting */
   2444 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2445 
   2446 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2447 			sc->sc_nvm_k1_enabled = 1;
   2448 		else
   2449 			sc->sc_nvm_k1_enabled = 0;
   2450 	}
   2451 
   2452 	/*
   2453 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2454 	 * media structures accordingly.
   2455 	 */
   2456 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2457 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2458 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2459 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2460 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2461 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2462 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2463 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2464 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2465 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2466 	    || (sc->sc_type ==WM_T_I211)) {
   2467 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2468 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2469 		switch (link_mode) {
   2470 		case CTRL_EXT_LINK_MODE_1000KX:
   2471 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2472 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2473 			break;
   2474 		case CTRL_EXT_LINK_MODE_SGMII:
   2475 			if (wm_sgmii_uses_mdio(sc)) {
   2476 				aprint_verbose_dev(sc->sc_dev,
   2477 				    "SGMII(MDIO)\n");
   2478 				sc->sc_flags |= WM_F_SGMII;
   2479 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2480 				break;
   2481 			}
   2482 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2483 			/*FALLTHROUGH*/
   2484 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2485 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2486 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2487 				if (link_mode
   2488 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2489 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2490 					sc->sc_flags |= WM_F_SGMII;
   2491 				} else {
   2492 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2493 					aprint_verbose_dev(sc->sc_dev,
   2494 					    "SERDES\n");
   2495 				}
   2496 				break;
   2497 			}
   2498 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2499 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2500 
   2501 			/* Change current link mode setting */
   2502 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2503 			switch (sc->sc_mediatype) {
   2504 			case WM_MEDIATYPE_COPPER:
   2505 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2506 				break;
   2507 			case WM_MEDIATYPE_SERDES:
   2508 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2509 				break;
   2510 			default:
   2511 				break;
   2512 			}
   2513 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2514 			break;
   2515 		case CTRL_EXT_LINK_MODE_GMII:
   2516 		default:
   2517 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2518 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2519 			break;
   2520 		}
   2521 
   2522 		reg &= ~CTRL_EXT_I2C_ENA;
   2523 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2524 			reg |= CTRL_EXT_I2C_ENA;
   2525 		else
   2526 			reg &= ~CTRL_EXT_I2C_ENA;
   2527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2528 
   2529 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2530 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2531 		else
   2532 			wm_tbi_mediainit(sc);
   2533 	} else if (sc->sc_type < WM_T_82543 ||
   2534 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2535 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2536 			aprint_error_dev(sc->sc_dev,
   2537 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2538 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2539 		}
   2540 		wm_tbi_mediainit(sc);
   2541 	} else {
   2542 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2543 			aprint_error_dev(sc->sc_dev,
   2544 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2546 		}
   2547 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2548 	}
   2549 
   2550 	ifp = &sc->sc_ethercom.ec_if;
   2551 	xname = device_xname(sc->sc_dev);
   2552 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2553 	ifp->if_softc = sc;
   2554 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2555 #ifdef WM_MPSAFE
   2556 	ifp->if_extflags = IFEF_START_MPSAFE;
   2557 #endif
   2558 	ifp->if_ioctl = wm_ioctl;
   2559 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2560 		ifp->if_start = wm_nq_start;
   2561 		if (sc->sc_nqueues > 1)
   2562 			ifp->if_transmit = wm_nq_transmit;
   2563 	} else {
   2564 		ifp->if_start = wm_start;
   2565 		if (sc->sc_nqueues > 1)
   2566 			ifp->if_transmit = wm_transmit;
   2567 	}
   2568 	ifp->if_watchdog = wm_watchdog;
   2569 	ifp->if_init = wm_init;
   2570 	ifp->if_stop = wm_stop;
   2571 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2572 	IFQ_SET_READY(&ifp->if_snd);
   2573 
   2574 	/* Check for jumbo frame */
   2575 	switch (sc->sc_type) {
   2576 	case WM_T_82573:
   2577 		/* XXX limited to 9234 if ASPM is disabled */
   2578 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2579 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2580 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2581 		break;
   2582 	case WM_T_82571:
   2583 	case WM_T_82572:
   2584 	case WM_T_82574:
   2585 	case WM_T_82575:
   2586 	case WM_T_82576:
   2587 	case WM_T_82580:
   2588 	case WM_T_I350:
   2589 	case WM_T_I354: /* XXXX ok? */
   2590 	case WM_T_I210:
   2591 	case WM_T_I211:
   2592 	case WM_T_80003:
   2593 	case WM_T_ICH9:
   2594 	case WM_T_ICH10:
   2595 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2596 	case WM_T_PCH_LPT:
   2597 	case WM_T_PCH_SPT:
   2598 		/* XXX limited to 9234 */
   2599 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2600 		break;
   2601 	case WM_T_PCH:
   2602 		/* XXX limited to 4096 */
   2603 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2604 		break;
   2605 	case WM_T_82542_2_0:
   2606 	case WM_T_82542_2_1:
   2607 	case WM_T_82583:
   2608 	case WM_T_ICH8:
   2609 		/* No support for jumbo frame */
   2610 		break;
   2611 	default:
   2612 		/* ETHER_MAX_LEN_JUMBO */
   2613 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2614 		break;
   2615 	}
   2616 
   2617 	/* If we're a i82543 or greater, we can support VLANs. */
   2618 	if (sc->sc_type >= WM_T_82543)
   2619 		sc->sc_ethercom.ec_capabilities |=
   2620 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2621 
   2622 	/*
   2623 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2624 	 * on i82543 and later.
   2625 	 */
   2626 	if (sc->sc_type >= WM_T_82543) {
   2627 		ifp->if_capabilities |=
   2628 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2629 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2630 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2631 		    IFCAP_CSUM_TCPv6_Tx |
   2632 		    IFCAP_CSUM_UDPv6_Tx;
   2633 	}
   2634 
   2635 	/*
   2636 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2637 	 *
   2638 	 *	82541GI (8086:1076) ... no
   2639 	 *	82572EI (8086:10b9) ... yes
   2640 	 */
   2641 	if (sc->sc_type >= WM_T_82571) {
   2642 		ifp->if_capabilities |=
   2643 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2644 	}
   2645 
   2646 	/*
   2647 	 * If we're a i82544 or greater (except i82547), we can do
   2648 	 * TCP segmentation offload.
   2649 	 */
   2650 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2651 		ifp->if_capabilities |= IFCAP_TSOv4;
   2652 	}
   2653 
   2654 	if (sc->sc_type >= WM_T_82571) {
   2655 		ifp->if_capabilities |= IFCAP_TSOv6;
   2656 	}
   2657 
   2658 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2659 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2660 
   2661 #ifdef WM_MPSAFE
   2662 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2663 #else
   2664 	sc->sc_core_lock = NULL;
   2665 #endif
   2666 
   2667 	/* Attach the interface. */
   2668 	if_initialize(ifp);
   2669 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2670 	ether_ifattach(ifp, enaddr);
   2671 	if_register(ifp);
   2672 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2673 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2674 			  RND_FLAG_DEFAULT);
   2675 
   2676 #ifdef WM_EVENT_COUNTERS
   2677 	/* Attach event counters. */
   2678 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2679 	    NULL, xname, "linkintr");
   2680 
   2681 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2682 	    NULL, xname, "tx_xoff");
   2683 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2684 	    NULL, xname, "tx_xon");
   2685 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2686 	    NULL, xname, "rx_xoff");
   2687 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2688 	    NULL, xname, "rx_xon");
   2689 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2690 	    NULL, xname, "rx_macctl");
   2691 #endif /* WM_EVENT_COUNTERS */
   2692 
   2693 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2694 		pmf_class_network_register(self, ifp);
   2695 	else
   2696 		aprint_error_dev(self, "couldn't establish power handler\n");
   2697 
   2698 	sc->sc_flags |= WM_F_ATTACHED;
   2699  out:
   2700 	return;
   2701 }
   2702 
   2703 /* The detach function (ca_detach) */
   2704 static int
   2705 wm_detach(device_t self, int flags __unused)
   2706 {
   2707 	struct wm_softc *sc = device_private(self);
   2708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2709 	int i;
   2710 
   2711 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2712 		return 0;
   2713 
   2714 	/* Stop the interface. Callouts are stopped in it. */
   2715 	wm_stop(ifp, 1);
   2716 
   2717 	pmf_device_deregister(self);
   2718 
   2719 #ifdef WM_EVENT_COUNTERS
   2720 	evcnt_detach(&sc->sc_ev_linkintr);
   2721 
   2722 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2723 	evcnt_detach(&sc->sc_ev_tx_xon);
   2724 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2725 	evcnt_detach(&sc->sc_ev_rx_xon);
   2726 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2727 #endif /* WM_EVENT_COUNTERS */
   2728 
   2729 	/* Tell the firmware about the release */
   2730 	WM_CORE_LOCK(sc);
   2731 	wm_release_manageability(sc);
   2732 	wm_release_hw_control(sc);
   2733 	wm_enable_wakeup(sc);
   2734 	WM_CORE_UNLOCK(sc);
   2735 
   2736 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2737 
   2738 	/* Delete all remaining media. */
   2739 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2740 
   2741 	ether_ifdetach(ifp);
   2742 	if_detach(ifp);
   2743 	if_percpuq_destroy(sc->sc_ipq);
   2744 
   2745 	/* Unload RX dmamaps and free mbufs */
   2746 	for (i = 0; i < sc->sc_nqueues; i++) {
   2747 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2748 		mutex_enter(rxq->rxq_lock);
   2749 		wm_rxdrain(rxq);
   2750 		mutex_exit(rxq->rxq_lock);
   2751 	}
   2752 	/* Must unlock here */
   2753 
   2754 	/* Disestablish the interrupt handler */
   2755 	for (i = 0; i < sc->sc_nintrs; i++) {
   2756 		if (sc->sc_ihs[i] != NULL) {
   2757 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2758 			sc->sc_ihs[i] = NULL;
   2759 		}
   2760 	}
   2761 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2762 
   2763 	wm_free_txrx_queues(sc);
   2764 
   2765 	/* Unmap the registers */
   2766 	if (sc->sc_ss) {
   2767 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2768 		sc->sc_ss = 0;
   2769 	}
   2770 	if (sc->sc_ios) {
   2771 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2772 		sc->sc_ios = 0;
   2773 	}
   2774 	if (sc->sc_flashs) {
   2775 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2776 		sc->sc_flashs = 0;
   2777 	}
   2778 
   2779 	if (sc->sc_core_lock)
   2780 		mutex_obj_free(sc->sc_core_lock);
   2781 	if (sc->sc_ich_phymtx)
   2782 		mutex_obj_free(sc->sc_ich_phymtx);
   2783 	if (sc->sc_ich_nvmmtx)
   2784 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2785 
   2786 	return 0;
   2787 }
   2788 
   2789 static bool
   2790 wm_suspend(device_t self, const pmf_qual_t *qual)
   2791 {
   2792 	struct wm_softc *sc = device_private(self);
   2793 
   2794 	wm_release_manageability(sc);
   2795 	wm_release_hw_control(sc);
   2796 	wm_enable_wakeup(sc);
   2797 
   2798 	return true;
   2799 }
   2800 
   2801 static bool
   2802 wm_resume(device_t self, const pmf_qual_t *qual)
   2803 {
   2804 	struct wm_softc *sc = device_private(self);
   2805 
   2806 	wm_init_manageability(sc);
   2807 
   2808 	return true;
   2809 }
   2810 
   2811 /*
   2812  * wm_watchdog:		[ifnet interface function]
   2813  *
   2814  *	Watchdog timer handler.
   2815  */
   2816 static void
   2817 wm_watchdog(struct ifnet *ifp)
   2818 {
   2819 	int qid;
   2820 	struct wm_softc *sc = ifp->if_softc;
   2821 
   2822 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2823 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2824 
   2825 		wm_watchdog_txq(ifp, txq);
   2826 	}
   2827 
   2828 	/* Reset the interface. */
   2829 	(void) wm_init(ifp);
   2830 
   2831 	/*
   2832 	 * There are still some upper layer processing which call
   2833 	 * ifp->if_start(). e.g. ALTQ
   2834 	 */
   2835 	/* Try to get more packets going. */
   2836 	ifp->if_start(ifp);
   2837 }
   2838 
   2839 static void
   2840 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2841 {
   2842 	struct wm_softc *sc = ifp->if_softc;
   2843 
   2844 	/*
   2845 	 * Since we're using delayed interrupts, sweep up
   2846 	 * before we report an error.
   2847 	 */
   2848 	mutex_enter(txq->txq_lock);
   2849 	wm_txeof(sc, txq);
   2850 	mutex_exit(txq->txq_lock);
   2851 
   2852 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2853 #ifdef WM_DEBUG
   2854 		int i, j;
   2855 		struct wm_txsoft *txs;
   2856 #endif
   2857 		log(LOG_ERR,
   2858 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2859 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2860 		    txq->txq_next);
   2861 		ifp->if_oerrors++;
   2862 #ifdef WM_DEBUG
   2863 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2864 		    i = WM_NEXTTXS(txq, i)) {
   2865 		    txs = &txq->txq_soft[i];
   2866 		    printf("txs %d tx %d -> %d\n",
   2867 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2868 		    for (j = txs->txs_firstdesc; ;
   2869 			j = WM_NEXTTX(txq, j)) {
   2870 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2871 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2872 			printf("\t %#08x%08x\n",
   2873 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2874 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2875 			if (j == txs->txs_lastdesc)
   2876 				break;
   2877 			}
   2878 		}
   2879 #endif
   2880 	}
   2881 }
   2882 
   2883 /*
   2884  * wm_tick:
   2885  *
   2886  *	One second timer, used to check link status, sweep up
   2887  *	completed transmit jobs, etc.
   2888  */
   2889 static void
   2890 wm_tick(void *arg)
   2891 {
   2892 	struct wm_softc *sc = arg;
   2893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2894 #ifndef WM_MPSAFE
   2895 	int s = splnet();
   2896 #endif
   2897 
   2898 	WM_CORE_LOCK(sc);
   2899 
   2900 	if (sc->sc_core_stopping)
   2901 		goto out;
   2902 
   2903 	if (sc->sc_type >= WM_T_82542_2_1) {
   2904 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2905 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2906 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2907 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2908 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2909 	}
   2910 
   2911 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2912 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2913 	    + CSR_READ(sc, WMREG_CRCERRS)
   2914 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2915 	    + CSR_READ(sc, WMREG_SYMERRC)
   2916 	    + CSR_READ(sc, WMREG_RXERRC)
   2917 	    + CSR_READ(sc, WMREG_SEC)
   2918 	    + CSR_READ(sc, WMREG_CEXTERR)
   2919 	    + CSR_READ(sc, WMREG_RLEC);
   2920 	/*
   2921 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2922 	 * memory. It does not mean the number of dropped packet. Because
   2923 	 * ethernet controller can receive packets in such case if there is
   2924 	 * space in phy's FIFO.
   2925 	 *
   2926 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2927 	 * own EVCNT instead of if_iqdrops.
   2928 	 */
   2929 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2930 
   2931 	if (sc->sc_flags & WM_F_HAS_MII)
   2932 		mii_tick(&sc->sc_mii);
   2933 	else if ((sc->sc_type >= WM_T_82575)
   2934 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2935 		wm_serdes_tick(sc);
   2936 	else
   2937 		wm_tbi_tick(sc);
   2938 
   2939 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2940 out:
   2941 	WM_CORE_UNLOCK(sc);
   2942 #ifndef WM_MPSAFE
   2943 	splx(s);
   2944 #endif
   2945 }
   2946 
   2947 static int
   2948 wm_ifflags_cb(struct ethercom *ec)
   2949 {
   2950 	struct ifnet *ifp = &ec->ec_if;
   2951 	struct wm_softc *sc = ifp->if_softc;
   2952 	int rc = 0;
   2953 
   2954 	WM_CORE_LOCK(sc);
   2955 
   2956 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2957 	sc->sc_if_flags = ifp->if_flags;
   2958 
   2959 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2960 		rc = ENETRESET;
   2961 		goto out;
   2962 	}
   2963 
   2964 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2965 		wm_set_filter(sc);
   2966 
   2967 	wm_set_vlan(sc);
   2968 
   2969 out:
   2970 	WM_CORE_UNLOCK(sc);
   2971 
   2972 	return rc;
   2973 }
   2974 
   2975 /*
   2976  * wm_ioctl:		[ifnet interface function]
   2977  *
   2978  *	Handle control requests from the operator.
   2979  */
   2980 static int
   2981 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2982 {
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	struct ifreq *ifr = (struct ifreq *) data;
   2985 	struct ifaddr *ifa = (struct ifaddr *)data;
   2986 	struct sockaddr_dl *sdl;
   2987 	int s, error;
   2988 
   2989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2990 		device_xname(sc->sc_dev), __func__));
   2991 
   2992 #ifndef WM_MPSAFE
   2993 	s = splnet();
   2994 #endif
   2995 	switch (cmd) {
   2996 	case SIOCSIFMEDIA:
   2997 	case SIOCGIFMEDIA:
   2998 		WM_CORE_LOCK(sc);
   2999 		/* Flow control requires full-duplex mode. */
   3000 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3001 		    (ifr->ifr_media & IFM_FDX) == 0)
   3002 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3003 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3004 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3005 				/* We can do both TXPAUSE and RXPAUSE. */
   3006 				ifr->ifr_media |=
   3007 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3008 			}
   3009 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3010 		}
   3011 		WM_CORE_UNLOCK(sc);
   3012 #ifdef WM_MPSAFE
   3013 		s = splnet();
   3014 #endif
   3015 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3016 #ifdef WM_MPSAFE
   3017 		splx(s);
   3018 #endif
   3019 		break;
   3020 	case SIOCINITIFADDR:
   3021 		WM_CORE_LOCK(sc);
   3022 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3023 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3024 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3025 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3026 			/* unicast address is first multicast entry */
   3027 			wm_set_filter(sc);
   3028 			error = 0;
   3029 			WM_CORE_UNLOCK(sc);
   3030 			break;
   3031 		}
   3032 		WM_CORE_UNLOCK(sc);
   3033 		/*FALLTHROUGH*/
   3034 	default:
   3035 #ifdef WM_MPSAFE
   3036 		s = splnet();
   3037 #endif
   3038 		/* It may call wm_start, so unlock here */
   3039 		error = ether_ioctl(ifp, cmd, data);
   3040 #ifdef WM_MPSAFE
   3041 		splx(s);
   3042 #endif
   3043 		if (error != ENETRESET)
   3044 			break;
   3045 
   3046 		error = 0;
   3047 
   3048 		if (cmd == SIOCSIFCAP) {
   3049 			error = (*ifp->if_init)(ifp);
   3050 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3051 			;
   3052 		else if (ifp->if_flags & IFF_RUNNING) {
   3053 			/*
   3054 			 * Multicast list has changed; set the hardware filter
   3055 			 * accordingly.
   3056 			 */
   3057 			WM_CORE_LOCK(sc);
   3058 			wm_set_filter(sc);
   3059 			WM_CORE_UNLOCK(sc);
   3060 		}
   3061 		break;
   3062 	}
   3063 
   3064 #ifndef WM_MPSAFE
   3065 	splx(s);
   3066 #endif
   3067 	return error;
   3068 }
   3069 
   3070 /* MAC address related */
   3071 
   3072 /*
   3073  * Get the offset of MAC address and return it.
   3074  * If error occured, use offset 0.
   3075  */
   3076 static uint16_t
   3077 wm_check_alt_mac_addr(struct wm_softc *sc)
   3078 {
   3079 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3080 	uint16_t offset = NVM_OFF_MACADDR;
   3081 
   3082 	/* Try to read alternative MAC address pointer */
   3083 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3084 		return 0;
   3085 
   3086 	/* Check pointer if it's valid or not. */
   3087 	if ((offset == 0x0000) || (offset == 0xffff))
   3088 		return 0;
   3089 
   3090 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3091 	/*
   3092 	 * Check whether alternative MAC address is valid or not.
   3093 	 * Some cards have non 0xffff pointer but those don't use
   3094 	 * alternative MAC address in reality.
   3095 	 *
   3096 	 * Check whether the broadcast bit is set or not.
   3097 	 */
   3098 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3099 		if (((myea[0] & 0xff) & 0x01) == 0)
   3100 			return offset; /* Found */
   3101 
   3102 	/* Not found */
   3103 	return 0;
   3104 }
   3105 
   3106 static int
   3107 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3108 {
   3109 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3110 	uint16_t offset = NVM_OFF_MACADDR;
   3111 	int do_invert = 0;
   3112 
   3113 	switch (sc->sc_type) {
   3114 	case WM_T_82580:
   3115 	case WM_T_I350:
   3116 	case WM_T_I354:
   3117 		/* EEPROM Top Level Partitioning */
   3118 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3119 		break;
   3120 	case WM_T_82571:
   3121 	case WM_T_82575:
   3122 	case WM_T_82576:
   3123 	case WM_T_80003:
   3124 	case WM_T_I210:
   3125 	case WM_T_I211:
   3126 		offset = wm_check_alt_mac_addr(sc);
   3127 		if (offset == 0)
   3128 			if ((sc->sc_funcid & 0x01) == 1)
   3129 				do_invert = 1;
   3130 		break;
   3131 	default:
   3132 		if ((sc->sc_funcid & 0x01) == 1)
   3133 			do_invert = 1;
   3134 		break;
   3135 	}
   3136 
   3137 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3138 		goto bad;
   3139 
   3140 	enaddr[0] = myea[0] & 0xff;
   3141 	enaddr[1] = myea[0] >> 8;
   3142 	enaddr[2] = myea[1] & 0xff;
   3143 	enaddr[3] = myea[1] >> 8;
   3144 	enaddr[4] = myea[2] & 0xff;
   3145 	enaddr[5] = myea[2] >> 8;
   3146 
   3147 	/*
   3148 	 * Toggle the LSB of the MAC address on the second port
   3149 	 * of some dual port cards.
   3150 	 */
   3151 	if (do_invert != 0)
   3152 		enaddr[5] ^= 1;
   3153 
   3154 	return 0;
   3155 
   3156  bad:
   3157 	return -1;
   3158 }
   3159 
   3160 /*
   3161  * wm_set_ral:
   3162  *
   3163  *	Set an entery in the receive address list.
   3164  */
   3165 static void
   3166 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3167 {
   3168 	uint32_t ral_lo, ral_hi;
   3169 
   3170 	if (enaddr != NULL) {
   3171 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3172 		    (enaddr[3] << 24);
   3173 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3174 		ral_hi |= RAL_AV;
   3175 	} else {
   3176 		ral_lo = 0;
   3177 		ral_hi = 0;
   3178 	}
   3179 
   3180 	if (sc->sc_type >= WM_T_82544) {
   3181 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3182 		    ral_lo);
   3183 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3184 		    ral_hi);
   3185 	} else {
   3186 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3187 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3188 	}
   3189 }
   3190 
   3191 /*
   3192  * wm_mchash:
   3193  *
   3194  *	Compute the hash of the multicast address for the 4096-bit
   3195  *	multicast filter.
   3196  */
   3197 static uint32_t
   3198 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3199 {
   3200 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3201 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3202 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3203 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3204 	uint32_t hash;
   3205 
   3206 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3207 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3208 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3209 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3210 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3211 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3212 		return (hash & 0x3ff);
   3213 	}
   3214 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3215 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3216 
   3217 	return (hash & 0xfff);
   3218 }
   3219 
   3220 /*
   3221  * wm_set_filter:
   3222  *
   3223  *	Set up the receive filter.
   3224  */
   3225 static void
   3226 wm_set_filter(struct wm_softc *sc)
   3227 {
   3228 	struct ethercom *ec = &sc->sc_ethercom;
   3229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3230 	struct ether_multi *enm;
   3231 	struct ether_multistep step;
   3232 	bus_addr_t mta_reg;
   3233 	uint32_t hash, reg, bit;
   3234 	int i, size, ralmax;
   3235 
   3236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3237 		device_xname(sc->sc_dev), __func__));
   3238 
   3239 	if (sc->sc_type >= WM_T_82544)
   3240 		mta_reg = WMREG_CORDOVA_MTA;
   3241 	else
   3242 		mta_reg = WMREG_MTA;
   3243 
   3244 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3245 
   3246 	if (ifp->if_flags & IFF_BROADCAST)
   3247 		sc->sc_rctl |= RCTL_BAM;
   3248 	if (ifp->if_flags & IFF_PROMISC) {
   3249 		sc->sc_rctl |= RCTL_UPE;
   3250 		goto allmulti;
   3251 	}
   3252 
   3253 	/*
   3254 	 * Set the station address in the first RAL slot, and
   3255 	 * clear the remaining slots.
   3256 	 */
   3257 	if (sc->sc_type == WM_T_ICH8)
   3258 		size = WM_RAL_TABSIZE_ICH8 -1;
   3259 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3260 	    || (sc->sc_type == WM_T_PCH))
   3261 		size = WM_RAL_TABSIZE_ICH8;
   3262 	else if (sc->sc_type == WM_T_PCH2)
   3263 		size = WM_RAL_TABSIZE_PCH2;
   3264 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3265 		size = WM_RAL_TABSIZE_PCH_LPT;
   3266 	else if (sc->sc_type == WM_T_82575)
   3267 		size = WM_RAL_TABSIZE_82575;
   3268 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3269 		size = WM_RAL_TABSIZE_82576;
   3270 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3271 		size = WM_RAL_TABSIZE_I350;
   3272 	else
   3273 		size = WM_RAL_TABSIZE;
   3274 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3275 
   3276 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3277 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3278 		switch (i) {
   3279 		case 0:
   3280 			/* We can use all entries */
   3281 			ralmax = size;
   3282 			break;
   3283 		case 1:
   3284 			/* Only RAR[0] */
   3285 			ralmax = 1;
   3286 			break;
   3287 		default:
   3288 			/* available SHRA + RAR[0] */
   3289 			ralmax = i + 1;
   3290 		}
   3291 	} else
   3292 		ralmax = size;
   3293 	for (i = 1; i < size; i++) {
   3294 		if (i < ralmax)
   3295 			wm_set_ral(sc, NULL, i);
   3296 	}
   3297 
   3298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3299 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3300 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3301 	    || (sc->sc_type == WM_T_PCH_SPT))
   3302 		size = WM_ICH8_MC_TABSIZE;
   3303 	else
   3304 		size = WM_MC_TABSIZE;
   3305 	/* Clear out the multicast table. */
   3306 	for (i = 0; i < size; i++)
   3307 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3308 
   3309 	ETHER_LOCK(ec);
   3310 	ETHER_FIRST_MULTI(step, ec, enm);
   3311 	while (enm != NULL) {
   3312 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3313 			ETHER_UNLOCK(ec);
   3314 			/*
   3315 			 * We must listen to a range of multicast addresses.
   3316 			 * For now, just accept all multicasts, rather than
   3317 			 * trying to set only those filter bits needed to match
   3318 			 * the range.  (At this time, the only use of address
   3319 			 * ranges is for IP multicast routing, for which the
   3320 			 * range is big enough to require all bits set.)
   3321 			 */
   3322 			goto allmulti;
   3323 		}
   3324 
   3325 		hash = wm_mchash(sc, enm->enm_addrlo);
   3326 
   3327 		reg = (hash >> 5);
   3328 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3329 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3330 		    || (sc->sc_type == WM_T_PCH2)
   3331 		    || (sc->sc_type == WM_T_PCH_LPT)
   3332 		    || (sc->sc_type == WM_T_PCH_SPT))
   3333 			reg &= 0x1f;
   3334 		else
   3335 			reg &= 0x7f;
   3336 		bit = hash & 0x1f;
   3337 
   3338 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3339 		hash |= 1U << bit;
   3340 
   3341 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3342 			/*
   3343 			 * 82544 Errata 9: Certain register cannot be written
   3344 			 * with particular alignments in PCI-X bus operation
   3345 			 * (FCAH, MTA and VFTA).
   3346 			 */
   3347 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3348 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3349 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3350 		} else
   3351 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3352 
   3353 		ETHER_NEXT_MULTI(step, enm);
   3354 	}
   3355 	ETHER_UNLOCK(ec);
   3356 
   3357 	ifp->if_flags &= ~IFF_ALLMULTI;
   3358 	goto setit;
   3359 
   3360  allmulti:
   3361 	ifp->if_flags |= IFF_ALLMULTI;
   3362 	sc->sc_rctl |= RCTL_MPE;
   3363 
   3364  setit:
   3365 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3366 }
   3367 
   3368 /* Reset and init related */
   3369 
   3370 static void
   3371 wm_set_vlan(struct wm_softc *sc)
   3372 {
   3373 
   3374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3375 		device_xname(sc->sc_dev), __func__));
   3376 
   3377 	/* Deal with VLAN enables. */
   3378 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3379 		sc->sc_ctrl |= CTRL_VME;
   3380 	else
   3381 		sc->sc_ctrl &= ~CTRL_VME;
   3382 
   3383 	/* Write the control registers. */
   3384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3385 }
   3386 
   3387 static void
   3388 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3389 {
   3390 	uint32_t gcr;
   3391 	pcireg_t ctrl2;
   3392 
   3393 	gcr = CSR_READ(sc, WMREG_GCR);
   3394 
   3395 	/* Only take action if timeout value is defaulted to 0 */
   3396 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3397 		goto out;
   3398 
   3399 	if ((gcr & GCR_CAP_VER2) == 0) {
   3400 		gcr |= GCR_CMPL_TMOUT_10MS;
   3401 		goto out;
   3402 	}
   3403 
   3404 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3405 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3406 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3407 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3408 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3409 
   3410 out:
   3411 	/* Disable completion timeout resend */
   3412 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3413 
   3414 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3415 }
   3416 
   3417 void
   3418 wm_get_auto_rd_done(struct wm_softc *sc)
   3419 {
   3420 	int i;
   3421 
   3422 	/* wait for eeprom to reload */
   3423 	switch (sc->sc_type) {
   3424 	case WM_T_82571:
   3425 	case WM_T_82572:
   3426 	case WM_T_82573:
   3427 	case WM_T_82574:
   3428 	case WM_T_82583:
   3429 	case WM_T_82575:
   3430 	case WM_T_82576:
   3431 	case WM_T_82580:
   3432 	case WM_T_I350:
   3433 	case WM_T_I354:
   3434 	case WM_T_I210:
   3435 	case WM_T_I211:
   3436 	case WM_T_80003:
   3437 	case WM_T_ICH8:
   3438 	case WM_T_ICH9:
   3439 		for (i = 0; i < 10; i++) {
   3440 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3441 				break;
   3442 			delay(1000);
   3443 		}
   3444 		if (i == 10) {
   3445 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3446 			    "complete\n", device_xname(sc->sc_dev));
   3447 		}
   3448 		break;
   3449 	default:
   3450 		break;
   3451 	}
   3452 }
   3453 
   3454 void
   3455 wm_lan_init_done(struct wm_softc *sc)
   3456 {
   3457 	uint32_t reg = 0;
   3458 	int i;
   3459 
   3460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3461 		device_xname(sc->sc_dev), __func__));
   3462 
   3463 	/* Wait for eeprom to reload */
   3464 	switch (sc->sc_type) {
   3465 	case WM_T_ICH10:
   3466 	case WM_T_PCH:
   3467 	case WM_T_PCH2:
   3468 	case WM_T_PCH_LPT:
   3469 	case WM_T_PCH_SPT:
   3470 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3471 			reg = CSR_READ(sc, WMREG_STATUS);
   3472 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3473 				break;
   3474 			delay(100);
   3475 		}
   3476 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3477 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3478 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3479 		}
   3480 		break;
   3481 	default:
   3482 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3483 		    __func__);
   3484 		break;
   3485 	}
   3486 
   3487 	reg &= ~STATUS_LAN_INIT_DONE;
   3488 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3489 }
   3490 
   3491 void
   3492 wm_get_cfg_done(struct wm_softc *sc)
   3493 {
   3494 	int mask;
   3495 	uint32_t reg;
   3496 	int i;
   3497 
   3498 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3499 		device_xname(sc->sc_dev), __func__));
   3500 
   3501 	/* Wait for eeprom to reload */
   3502 	switch (sc->sc_type) {
   3503 	case WM_T_82542_2_0:
   3504 	case WM_T_82542_2_1:
   3505 		/* null */
   3506 		break;
   3507 	case WM_T_82543:
   3508 	case WM_T_82544:
   3509 	case WM_T_82540:
   3510 	case WM_T_82545:
   3511 	case WM_T_82545_3:
   3512 	case WM_T_82546:
   3513 	case WM_T_82546_3:
   3514 	case WM_T_82541:
   3515 	case WM_T_82541_2:
   3516 	case WM_T_82547:
   3517 	case WM_T_82547_2:
   3518 	case WM_T_82573:
   3519 	case WM_T_82574:
   3520 	case WM_T_82583:
   3521 		/* generic */
   3522 		delay(10*1000);
   3523 		break;
   3524 	case WM_T_80003:
   3525 	case WM_T_82571:
   3526 	case WM_T_82572:
   3527 	case WM_T_82575:
   3528 	case WM_T_82576:
   3529 	case WM_T_82580:
   3530 	case WM_T_I350:
   3531 	case WM_T_I354:
   3532 	case WM_T_I210:
   3533 	case WM_T_I211:
   3534 		if (sc->sc_type == WM_T_82571) {
   3535 			/* Only 82571 shares port 0 */
   3536 			mask = EEMNGCTL_CFGDONE_0;
   3537 		} else
   3538 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3539 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3540 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3541 				break;
   3542 			delay(1000);
   3543 		}
   3544 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3545 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3546 				device_xname(sc->sc_dev), __func__));
   3547 		}
   3548 		break;
   3549 	case WM_T_ICH8:
   3550 	case WM_T_ICH9:
   3551 	case WM_T_ICH10:
   3552 	case WM_T_PCH:
   3553 	case WM_T_PCH2:
   3554 	case WM_T_PCH_LPT:
   3555 	case WM_T_PCH_SPT:
   3556 		delay(10*1000);
   3557 		if (sc->sc_type >= WM_T_ICH10)
   3558 			wm_lan_init_done(sc);
   3559 		else
   3560 			wm_get_auto_rd_done(sc);
   3561 
   3562 		reg = CSR_READ(sc, WMREG_STATUS);
   3563 		if ((reg & STATUS_PHYRA) != 0)
   3564 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3565 		break;
   3566 	default:
   3567 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3568 		    __func__);
   3569 		break;
   3570 	}
   3571 }
   3572 
   3573 /* Init hardware bits */
   3574 void
   3575 wm_initialize_hardware_bits(struct wm_softc *sc)
   3576 {
   3577 	uint32_t tarc0, tarc1, reg;
   3578 
   3579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3580 		device_xname(sc->sc_dev), __func__));
   3581 
   3582 	/* For 82571 variant, 80003 and ICHs */
   3583 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3584 	    || (sc->sc_type >= WM_T_80003)) {
   3585 
   3586 		/* Transmit Descriptor Control 0 */
   3587 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3588 		reg |= TXDCTL_COUNT_DESC;
   3589 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3590 
   3591 		/* Transmit Descriptor Control 1 */
   3592 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3593 		reg |= TXDCTL_COUNT_DESC;
   3594 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3595 
   3596 		/* TARC0 */
   3597 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3598 		switch (sc->sc_type) {
   3599 		case WM_T_82571:
   3600 		case WM_T_82572:
   3601 		case WM_T_82573:
   3602 		case WM_T_82574:
   3603 		case WM_T_82583:
   3604 		case WM_T_80003:
   3605 			/* Clear bits 30..27 */
   3606 			tarc0 &= ~__BITS(30, 27);
   3607 			break;
   3608 		default:
   3609 			break;
   3610 		}
   3611 
   3612 		switch (sc->sc_type) {
   3613 		case WM_T_82571:
   3614 		case WM_T_82572:
   3615 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3616 
   3617 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3618 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3619 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3620 			/* 8257[12] Errata No.7 */
   3621 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3622 
   3623 			/* TARC1 bit 28 */
   3624 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3625 				tarc1 &= ~__BIT(28);
   3626 			else
   3627 				tarc1 |= __BIT(28);
   3628 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3629 
   3630 			/*
   3631 			 * 8257[12] Errata No.13
   3632 			 * Disable Dyamic Clock Gating.
   3633 			 */
   3634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3635 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3636 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3637 			break;
   3638 		case WM_T_82573:
   3639 		case WM_T_82574:
   3640 		case WM_T_82583:
   3641 			if ((sc->sc_type == WM_T_82574)
   3642 			    || (sc->sc_type == WM_T_82583))
   3643 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3644 
   3645 			/* Extended Device Control */
   3646 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3647 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3648 			reg |= __BIT(22);	/* Set bit 22 */
   3649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3650 
   3651 			/* Device Control */
   3652 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3653 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 
   3655 			/* PCIe Control Register */
   3656 			/*
   3657 			 * 82573 Errata (unknown).
   3658 			 *
   3659 			 * 82574 Errata 25 and 82583 Errata 12
   3660 			 * "Dropped Rx Packets":
   3661 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3662 			 */
   3663 			reg = CSR_READ(sc, WMREG_GCR);
   3664 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3665 			CSR_WRITE(sc, WMREG_GCR, reg);
   3666 
   3667 			if ((sc->sc_type == WM_T_82574)
   3668 			    || (sc->sc_type == WM_T_82583)) {
   3669 				/*
   3670 				 * Document says this bit must be set for
   3671 				 * proper operation.
   3672 				 */
   3673 				reg = CSR_READ(sc, WMREG_GCR);
   3674 				reg |= __BIT(22);
   3675 				CSR_WRITE(sc, WMREG_GCR, reg);
   3676 
   3677 				/*
   3678 				 * Apply workaround for hardware errata
   3679 				 * documented in errata docs Fixes issue where
   3680 				 * some error prone or unreliable PCIe
   3681 				 * completions are occurring, particularly
   3682 				 * with ASPM enabled. Without fix, issue can
   3683 				 * cause Tx timeouts.
   3684 				 */
   3685 				reg = CSR_READ(sc, WMREG_GCR2);
   3686 				reg |= __BIT(0);
   3687 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3688 			}
   3689 			break;
   3690 		case WM_T_80003:
   3691 			/* TARC0 */
   3692 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3693 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3694 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3695 
   3696 			/* TARC1 bit 28 */
   3697 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3698 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3699 				tarc1 &= ~__BIT(28);
   3700 			else
   3701 				tarc1 |= __BIT(28);
   3702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3703 			break;
   3704 		case WM_T_ICH8:
   3705 		case WM_T_ICH9:
   3706 		case WM_T_ICH10:
   3707 		case WM_T_PCH:
   3708 		case WM_T_PCH2:
   3709 		case WM_T_PCH_LPT:
   3710 		case WM_T_PCH_SPT:
   3711 			/* TARC0 */
   3712 			if ((sc->sc_type == WM_T_ICH8)
   3713 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3714 				/* Set TARC0 bits 29 and 28 */
   3715 				tarc0 |= __BITS(29, 28);
   3716 			}
   3717 			/* Set TARC0 bits 23,24,26,27 */
   3718 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3719 
   3720 			/* CTRL_EXT */
   3721 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3722 			reg |= __BIT(22);	/* Set bit 22 */
   3723 			/*
   3724 			 * Enable PHY low-power state when MAC is at D3
   3725 			 * w/o WoL
   3726 			 */
   3727 			if (sc->sc_type >= WM_T_PCH)
   3728 				reg |= CTRL_EXT_PHYPDEN;
   3729 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3730 
   3731 			/* TARC1 */
   3732 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3733 			/* bit 28 */
   3734 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3735 				tarc1 &= ~__BIT(28);
   3736 			else
   3737 				tarc1 |= __BIT(28);
   3738 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3739 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3740 
   3741 			/* Device Status */
   3742 			if (sc->sc_type == WM_T_ICH8) {
   3743 				reg = CSR_READ(sc, WMREG_STATUS);
   3744 				reg &= ~__BIT(31);
   3745 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3746 
   3747 			}
   3748 
   3749 			/* IOSFPC */
   3750 			if (sc->sc_type == WM_T_PCH_SPT) {
   3751 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3752 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3753 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3754 			}
   3755 			/*
   3756 			 * Work-around descriptor data corruption issue during
   3757 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3758 			 * capability.
   3759 			 */
   3760 			reg = CSR_READ(sc, WMREG_RFCTL);
   3761 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3762 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3763 			break;
   3764 		default:
   3765 			break;
   3766 		}
   3767 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3768 
   3769 		switch (sc->sc_type) {
   3770 		/*
   3771 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3772 		 * Avoid RSS Hash Value bug.
   3773 		 */
   3774 		case WM_T_82571:
   3775 		case WM_T_82572:
   3776 		case WM_T_82573:
   3777 		case WM_T_80003:
   3778 		case WM_T_ICH8:
   3779 			reg = CSR_READ(sc, WMREG_RFCTL);
   3780 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3781 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3782 			break;
   3783 		case WM_T_82574:
   3784 			/* use extened Rx descriptor. */
   3785 			reg = CSR_READ(sc, WMREG_RFCTL);
   3786 			reg |= WMREG_RFCTL_EXSTEN;
   3787 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3788 			break;
   3789 		default:
   3790 			break;
   3791 		}
   3792 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3793 		/*
   3794 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3795 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3796 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3797 		 * Correctly by the Device"
   3798 		 *
   3799 		 * I354(C2000) Errata AVR53:
   3800 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3801 		 * Hang"
   3802 		 */
   3803 		reg = CSR_READ(sc, WMREG_RFCTL);
   3804 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3805 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3806 	}
   3807 }
   3808 
   3809 static uint32_t
   3810 wm_rxpbs_adjust_82580(uint32_t val)
   3811 {
   3812 	uint32_t rv = 0;
   3813 
   3814 	if (val < __arraycount(wm_82580_rxpbs_table))
   3815 		rv = wm_82580_rxpbs_table[val];
   3816 
   3817 	return rv;
   3818 }
   3819 
   3820 /*
   3821  * wm_reset_phy:
   3822  *
   3823  *	generic PHY reset function.
   3824  *	Same as e1000_phy_hw_reset_generic()
   3825  */
   3826 static void
   3827 wm_reset_phy(struct wm_softc *sc)
   3828 {
   3829 	uint32_t reg;
   3830 
   3831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3832 		device_xname(sc->sc_dev), __func__));
   3833 	if (wm_phy_resetisblocked(sc))
   3834 		return;
   3835 
   3836 	sc->phy.acquire(sc);
   3837 
   3838 	reg = CSR_READ(sc, WMREG_CTRL);
   3839 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3840 	CSR_WRITE_FLUSH(sc);
   3841 
   3842 	delay(sc->phy.reset_delay_us);
   3843 
   3844 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3845 	CSR_WRITE_FLUSH(sc);
   3846 
   3847 	delay(150);
   3848 
   3849 	sc->phy.release(sc);
   3850 
   3851 	wm_get_cfg_done(sc);
   3852 }
   3853 
   3854 static void
   3855 wm_flush_desc_rings(struct wm_softc *sc)
   3856 {
   3857 	pcireg_t preg;
   3858 	uint32_t reg;
   3859 	int nexttx;
   3860 
   3861 	/* First, disable MULR fix in FEXTNVM11 */
   3862 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3863 	reg |= FEXTNVM11_DIS_MULRFIX;
   3864 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3865 
   3866 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3867 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3868 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3869 		struct wm_txqueue *txq;
   3870 		wiseman_txdesc_t *txd;
   3871 
   3872 		/* TX */
   3873 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3874 		    device_xname(sc->sc_dev), preg, reg);
   3875 		reg = CSR_READ(sc, WMREG_TCTL);
   3876 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3877 
   3878 		txq = &sc->sc_queue[0].wmq_txq;
   3879 		nexttx = txq->txq_next;
   3880 		txd = &txq->txq_descs[nexttx];
   3881 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3882 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3883 		txd->wtx_fields.wtxu_status = 0;
   3884 		txd->wtx_fields.wtxu_options = 0;
   3885 		txd->wtx_fields.wtxu_vlan = 0;
   3886 
   3887 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3888 			BUS_SPACE_BARRIER_WRITE);
   3889 
   3890 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3891 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3892 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3893 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3894 		delay(250);
   3895 	}
   3896 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3897 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3898 		uint32_t rctl;
   3899 
   3900 		/* RX */
   3901 		printf("%s: Need RX flush (reg = %08x)\n",
   3902 		    device_xname(sc->sc_dev), preg);
   3903 		rctl = CSR_READ(sc, WMREG_RCTL);
   3904 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3905 		CSR_WRITE_FLUSH(sc);
   3906 		delay(150);
   3907 
   3908 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3909 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3910 		reg &= 0xffffc000;
   3911 		/*
   3912 		 * update thresholds: prefetch threshold to 31, host threshold
   3913 		 * to 1 and make sure the granularity is "descriptors" and not
   3914 		 * "cache lines"
   3915 		 */
   3916 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3917 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3918 
   3919 		/*
   3920 		 * momentarily enable the RX ring for the changes to take
   3921 		 * effect
   3922 		 */
   3923 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3924 		CSR_WRITE_FLUSH(sc);
   3925 		delay(150);
   3926 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3927 	}
   3928 }
   3929 
   3930 /*
   3931  * wm_reset:
   3932  *
   3933  *	Reset the i82542 chip.
   3934  */
   3935 static void
   3936 wm_reset(struct wm_softc *sc)
   3937 {
   3938 	int phy_reset = 0;
   3939 	int i, error = 0;
   3940 	uint32_t reg;
   3941 
   3942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 	KASSERT(sc->sc_type != 0);
   3945 
   3946 	/*
   3947 	 * Allocate on-chip memory according to the MTU size.
   3948 	 * The Packet Buffer Allocation register must be written
   3949 	 * before the chip is reset.
   3950 	 */
   3951 	switch (sc->sc_type) {
   3952 	case WM_T_82547:
   3953 	case WM_T_82547_2:
   3954 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3955 		    PBA_22K : PBA_30K;
   3956 		for (i = 0; i < sc->sc_nqueues; i++) {
   3957 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3958 			txq->txq_fifo_head = 0;
   3959 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3960 			txq->txq_fifo_size =
   3961 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3962 			txq->txq_fifo_stall = 0;
   3963 		}
   3964 		break;
   3965 	case WM_T_82571:
   3966 	case WM_T_82572:
   3967 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3968 	case WM_T_80003:
   3969 		sc->sc_pba = PBA_32K;
   3970 		break;
   3971 	case WM_T_82573:
   3972 		sc->sc_pba = PBA_12K;
   3973 		break;
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 		sc->sc_pba = PBA_20K;
   3977 		break;
   3978 	case WM_T_82576:
   3979 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3980 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3981 		break;
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3986 		break;
   3987 	case WM_T_I210:
   3988 	case WM_T_I211:
   3989 		sc->sc_pba = PBA_34K;
   3990 		break;
   3991 	case WM_T_ICH8:
   3992 		/* Workaround for a bit corruption issue in FIFO memory */
   3993 		sc->sc_pba = PBA_8K;
   3994 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3995 		break;
   3996 	case WM_T_ICH9:
   3997 	case WM_T_ICH10:
   3998 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3999 		    PBA_14K : PBA_10K;
   4000 		break;
   4001 	case WM_T_PCH:
   4002 	case WM_T_PCH2:
   4003 	case WM_T_PCH_LPT:
   4004 	case WM_T_PCH_SPT:
   4005 		sc->sc_pba = PBA_26K;
   4006 		break;
   4007 	default:
   4008 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4009 		    PBA_40K : PBA_48K;
   4010 		break;
   4011 	}
   4012 	/*
   4013 	 * Only old or non-multiqueue devices have the PBA register
   4014 	 * XXX Need special handling for 82575.
   4015 	 */
   4016 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4017 	    || (sc->sc_type == WM_T_82575))
   4018 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4019 
   4020 	/* Prevent the PCI-E bus from sticking */
   4021 	if (sc->sc_flags & WM_F_PCIE) {
   4022 		int timeout = 800;
   4023 
   4024 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4026 
   4027 		while (timeout--) {
   4028 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4029 			    == 0)
   4030 				break;
   4031 			delay(100);
   4032 		}
   4033 	}
   4034 
   4035 	/* Set the completion timeout for interface */
   4036 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4037 	    || (sc->sc_type == WM_T_82580)
   4038 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4039 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4040 		wm_set_pcie_completion_timeout(sc);
   4041 
   4042 	/* Clear interrupt */
   4043 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4044 	if (sc->sc_nintrs > 1) {
   4045 		if (sc->sc_type != WM_T_82574) {
   4046 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4047 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4048 		} else {
   4049 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4050 		}
   4051 	}
   4052 
   4053 	/* Stop the transmit and receive processes. */
   4054 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4055 	sc->sc_rctl &= ~RCTL_EN;
   4056 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4057 	CSR_WRITE_FLUSH(sc);
   4058 
   4059 	/* XXX set_tbi_sbp_82543() */
   4060 
   4061 	delay(10*1000);
   4062 
   4063 	/* Must acquire the MDIO ownership before MAC reset */
   4064 	switch (sc->sc_type) {
   4065 	case WM_T_82573:
   4066 	case WM_T_82574:
   4067 	case WM_T_82583:
   4068 		error = wm_get_hw_semaphore_82573(sc);
   4069 		break;
   4070 	default:
   4071 		break;
   4072 	}
   4073 
   4074 	/*
   4075 	 * 82541 Errata 29? & 82547 Errata 28?
   4076 	 * See also the description about PHY_RST bit in CTRL register
   4077 	 * in 8254x_GBe_SDM.pdf.
   4078 	 */
   4079 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4080 		CSR_WRITE(sc, WMREG_CTRL,
   4081 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4082 		CSR_WRITE_FLUSH(sc);
   4083 		delay(5000);
   4084 	}
   4085 
   4086 	switch (sc->sc_type) {
   4087 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4088 	case WM_T_82541:
   4089 	case WM_T_82541_2:
   4090 	case WM_T_82547:
   4091 	case WM_T_82547_2:
   4092 		/*
   4093 		 * On some chipsets, a reset through a memory-mapped write
   4094 		 * cycle can cause the chip to reset before completing the
   4095 		 * write cycle.  This causes major headache that can be
   4096 		 * avoided by issuing the reset via indirect register writes
   4097 		 * through I/O space.
   4098 		 *
   4099 		 * So, if we successfully mapped the I/O BAR at attach time,
   4100 		 * use that.  Otherwise, try our luck with a memory-mapped
   4101 		 * reset.
   4102 		 */
   4103 		if (sc->sc_flags & WM_F_IOH_VALID)
   4104 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4105 		else
   4106 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4107 		break;
   4108 	case WM_T_82545_3:
   4109 	case WM_T_82546_3:
   4110 		/* Use the shadow control register on these chips. */
   4111 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4112 		break;
   4113 	case WM_T_80003:
   4114 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4115 		sc->phy.acquire(sc);
   4116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4117 		sc->phy.release(sc);
   4118 		break;
   4119 	case WM_T_ICH8:
   4120 	case WM_T_ICH9:
   4121 	case WM_T_ICH10:
   4122 	case WM_T_PCH:
   4123 	case WM_T_PCH2:
   4124 	case WM_T_PCH_LPT:
   4125 	case WM_T_PCH_SPT:
   4126 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4127 		if (wm_phy_resetisblocked(sc) == false) {
   4128 			/*
   4129 			 * Gate automatic PHY configuration by hardware on
   4130 			 * non-managed 82579
   4131 			 */
   4132 			if ((sc->sc_type == WM_T_PCH2)
   4133 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4134 				== 0))
   4135 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4136 
   4137 			reg |= CTRL_PHY_RESET;
   4138 			phy_reset = 1;
   4139 		} else
   4140 			printf("XXX reset is blocked!!!\n");
   4141 		sc->phy.acquire(sc);
   4142 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4143 		/* Don't insert a completion barrier when reset */
   4144 		delay(20*1000);
   4145 		mutex_exit(sc->sc_ich_phymtx);
   4146 		break;
   4147 	case WM_T_82580:
   4148 	case WM_T_I350:
   4149 	case WM_T_I354:
   4150 	case WM_T_I210:
   4151 	case WM_T_I211:
   4152 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4153 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4154 			CSR_WRITE_FLUSH(sc);
   4155 		delay(5000);
   4156 		break;
   4157 	case WM_T_82542_2_0:
   4158 	case WM_T_82542_2_1:
   4159 	case WM_T_82543:
   4160 	case WM_T_82540:
   4161 	case WM_T_82545:
   4162 	case WM_T_82546:
   4163 	case WM_T_82571:
   4164 	case WM_T_82572:
   4165 	case WM_T_82573:
   4166 	case WM_T_82574:
   4167 	case WM_T_82575:
   4168 	case WM_T_82576:
   4169 	case WM_T_82583:
   4170 	default:
   4171 		/* Everything else can safely use the documented method. */
   4172 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4173 		break;
   4174 	}
   4175 
   4176 	/* Must release the MDIO ownership after MAC reset */
   4177 	switch (sc->sc_type) {
   4178 	case WM_T_82573:
   4179 	case WM_T_82574:
   4180 	case WM_T_82583:
   4181 		if (error == 0)
   4182 			wm_put_hw_semaphore_82573(sc);
   4183 		break;
   4184 	default:
   4185 		break;
   4186 	}
   4187 
   4188 	if (phy_reset != 0)
   4189 		wm_get_cfg_done(sc);
   4190 
   4191 	/* reload EEPROM */
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_82542_2_0:
   4194 	case WM_T_82542_2_1:
   4195 	case WM_T_82543:
   4196 	case WM_T_82544:
   4197 		delay(10);
   4198 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4199 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4200 		CSR_WRITE_FLUSH(sc);
   4201 		delay(2000);
   4202 		break;
   4203 	case WM_T_82540:
   4204 	case WM_T_82545:
   4205 	case WM_T_82545_3:
   4206 	case WM_T_82546:
   4207 	case WM_T_82546_3:
   4208 		delay(5*1000);
   4209 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4210 		break;
   4211 	case WM_T_82541:
   4212 	case WM_T_82541_2:
   4213 	case WM_T_82547:
   4214 	case WM_T_82547_2:
   4215 		delay(20000);
   4216 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4217 		break;
   4218 	case WM_T_82571:
   4219 	case WM_T_82572:
   4220 	case WM_T_82573:
   4221 	case WM_T_82574:
   4222 	case WM_T_82583:
   4223 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4224 			delay(10);
   4225 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4226 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4227 			CSR_WRITE_FLUSH(sc);
   4228 		}
   4229 		/* check EECD_EE_AUTORD */
   4230 		wm_get_auto_rd_done(sc);
   4231 		/*
   4232 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4233 		 * is set.
   4234 		 */
   4235 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4236 		    || (sc->sc_type == WM_T_82583))
   4237 			delay(25*1000);
   4238 		break;
   4239 	case WM_T_82575:
   4240 	case WM_T_82576:
   4241 	case WM_T_82580:
   4242 	case WM_T_I350:
   4243 	case WM_T_I354:
   4244 	case WM_T_I210:
   4245 	case WM_T_I211:
   4246 	case WM_T_80003:
   4247 		/* check EECD_EE_AUTORD */
   4248 		wm_get_auto_rd_done(sc);
   4249 		break;
   4250 	case WM_T_ICH8:
   4251 	case WM_T_ICH9:
   4252 	case WM_T_ICH10:
   4253 	case WM_T_PCH:
   4254 	case WM_T_PCH2:
   4255 	case WM_T_PCH_LPT:
   4256 	case WM_T_PCH_SPT:
   4257 		break;
   4258 	default:
   4259 		panic("%s: unknown type\n", __func__);
   4260 	}
   4261 
   4262 	/* Check whether EEPROM is present or not */
   4263 	switch (sc->sc_type) {
   4264 	case WM_T_82575:
   4265 	case WM_T_82576:
   4266 	case WM_T_82580:
   4267 	case WM_T_I350:
   4268 	case WM_T_I354:
   4269 	case WM_T_ICH8:
   4270 	case WM_T_ICH9:
   4271 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4272 			/* Not found */
   4273 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4274 			if (sc->sc_type == WM_T_82575)
   4275 				wm_reset_init_script_82575(sc);
   4276 		}
   4277 		break;
   4278 	default:
   4279 		break;
   4280 	}
   4281 
   4282 	if ((sc->sc_type == WM_T_82580)
   4283 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4284 		/* clear global device reset status bit */
   4285 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4286 	}
   4287 
   4288 	/* Clear any pending interrupt events. */
   4289 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4290 	reg = CSR_READ(sc, WMREG_ICR);
   4291 	if (sc->sc_nintrs > 1) {
   4292 		if (sc->sc_type != WM_T_82574) {
   4293 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4294 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4295 		} else
   4296 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4297 	}
   4298 
   4299 	/* reload sc_ctrl */
   4300 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4301 
   4302 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4303 		wm_set_eee_i350(sc);
   4304 
   4305 	/* Clear the host wakeup bit after lcd reset */
   4306 	if (sc->sc_type >= WM_T_PCH) {
   4307 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4308 		    BM_PORT_GEN_CFG);
   4309 		reg &= ~BM_WUC_HOST_WU_BIT;
   4310 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4311 		    BM_PORT_GEN_CFG, reg);
   4312 	}
   4313 
   4314 	/*
   4315 	 * For PCH, this write will make sure that any noise will be detected
   4316 	 * as a CRC error and be dropped rather than show up as a bad packet
   4317 	 * to the DMA engine
   4318 	 */
   4319 	if (sc->sc_type == WM_T_PCH)
   4320 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4321 
   4322 	if (sc->sc_type >= WM_T_82544)
   4323 		CSR_WRITE(sc, WMREG_WUC, 0);
   4324 
   4325 	wm_reset_mdicnfg_82580(sc);
   4326 
   4327 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4328 		wm_pll_workaround_i210(sc);
   4329 }
   4330 
   4331 /*
   4332  * wm_add_rxbuf:
   4333  *
   4334  *	Add a receive buffer to the indiciated descriptor.
   4335  */
   4336 static int
   4337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4338 {
   4339 	struct wm_softc *sc = rxq->rxq_sc;
   4340 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4341 	struct mbuf *m;
   4342 	int error;
   4343 
   4344 	KASSERT(mutex_owned(rxq->rxq_lock));
   4345 
   4346 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4347 	if (m == NULL)
   4348 		return ENOBUFS;
   4349 
   4350 	MCLGET(m, M_DONTWAIT);
   4351 	if ((m->m_flags & M_EXT) == 0) {
   4352 		m_freem(m);
   4353 		return ENOBUFS;
   4354 	}
   4355 
   4356 	if (rxs->rxs_mbuf != NULL)
   4357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4358 
   4359 	rxs->rxs_mbuf = m;
   4360 
   4361 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4362 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4363 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4364 	if (error) {
   4365 		/* XXX XXX XXX */
   4366 		aprint_error_dev(sc->sc_dev,
   4367 		    "unable to load rx DMA map %d, error = %d\n",
   4368 		    idx, error);
   4369 		panic("wm_add_rxbuf");
   4370 	}
   4371 
   4372 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4373 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4374 
   4375 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4376 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4377 			wm_init_rxdesc(rxq, idx);
   4378 	} else
   4379 		wm_init_rxdesc(rxq, idx);
   4380 
   4381 	return 0;
   4382 }
   4383 
   4384 /*
   4385  * wm_rxdrain:
   4386  *
   4387  *	Drain the receive queue.
   4388  */
   4389 static void
   4390 wm_rxdrain(struct wm_rxqueue *rxq)
   4391 {
   4392 	struct wm_softc *sc = rxq->rxq_sc;
   4393 	struct wm_rxsoft *rxs;
   4394 	int i;
   4395 
   4396 	KASSERT(mutex_owned(rxq->rxq_lock));
   4397 
   4398 	for (i = 0; i < WM_NRXDESC; i++) {
   4399 		rxs = &rxq->rxq_soft[i];
   4400 		if (rxs->rxs_mbuf != NULL) {
   4401 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4402 			m_freem(rxs->rxs_mbuf);
   4403 			rxs->rxs_mbuf = NULL;
   4404 		}
   4405 	}
   4406 }
   4407 
   4408 
   4409 /*
   4410  * XXX copy from FreeBSD's sys/net/rss_config.c
   4411  */
   4412 /*
   4413  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4414  * effectiveness may be limited by algorithm choice and available entropy
   4415  * during the boot.
   4416  *
   4417  * XXXRW: And that we don't randomize it yet!
   4418  *
   4419  * This is the default Microsoft RSS specification key which is also
   4420  * the Chelsio T5 firmware default key.
   4421  */
   4422 #define RSS_KEYSIZE 40
   4423 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4424 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4425 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4426 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4427 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4428 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4429 };
   4430 
   4431 /*
   4432  * Caller must pass an array of size sizeof(rss_key).
   4433  *
   4434  * XXX
   4435  * As if_ixgbe may use this function, this function should not be
   4436  * if_wm specific function.
   4437  */
   4438 static void
   4439 wm_rss_getkey(uint8_t *key)
   4440 {
   4441 
   4442 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4443 }
   4444 
   4445 /*
   4446  * Setup registers for RSS.
   4447  *
   4448  * XXX not yet VMDq support
   4449  */
   4450 static void
   4451 wm_init_rss(struct wm_softc *sc)
   4452 {
   4453 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4454 	int i;
   4455 
   4456 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4457 
   4458 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4459 		int qid, reta_ent;
   4460 
   4461 		qid  = i % sc->sc_nqueues;
   4462 		switch(sc->sc_type) {
   4463 		case WM_T_82574:
   4464 			reta_ent = __SHIFTIN(qid,
   4465 			    RETA_ENT_QINDEX_MASK_82574);
   4466 			break;
   4467 		case WM_T_82575:
   4468 			reta_ent = __SHIFTIN(qid,
   4469 			    RETA_ENT_QINDEX1_MASK_82575);
   4470 			break;
   4471 		default:
   4472 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4473 			break;
   4474 		}
   4475 
   4476 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4477 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4478 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4479 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4480 	}
   4481 
   4482 	wm_rss_getkey((uint8_t *)rss_key);
   4483 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4484 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4485 
   4486 	if (sc->sc_type == WM_T_82574)
   4487 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4488 	else
   4489 		mrqc = MRQC_ENABLE_RSS_MQ;
   4490 
   4491 	/*
   4492 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4493 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4494 	 */
   4495 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4496 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4497 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4498 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4499 
   4500 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4501 }
   4502 
   4503 /*
   4504  * Adjust TX and RX queue numbers which the system actulally uses.
   4505  *
   4506  * The numbers are affected by below parameters.
   4507  *     - The nubmer of hardware queues
   4508  *     - The number of MSI-X vectors (= "nvectors" argument)
   4509  *     - ncpu
   4510  */
   4511 static void
   4512 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4513 {
   4514 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4515 
   4516 	if (nvectors < 2) {
   4517 		sc->sc_nqueues = 1;
   4518 		return;
   4519 	}
   4520 
   4521 	switch(sc->sc_type) {
   4522 	case WM_T_82572:
   4523 		hw_ntxqueues = 2;
   4524 		hw_nrxqueues = 2;
   4525 		break;
   4526 	case WM_T_82574:
   4527 		hw_ntxqueues = 2;
   4528 		hw_nrxqueues = 2;
   4529 		break;
   4530 	case WM_T_82575:
   4531 		hw_ntxqueues = 4;
   4532 		hw_nrxqueues = 4;
   4533 		break;
   4534 	case WM_T_82576:
   4535 		hw_ntxqueues = 16;
   4536 		hw_nrxqueues = 16;
   4537 		break;
   4538 	case WM_T_82580:
   4539 	case WM_T_I350:
   4540 	case WM_T_I354:
   4541 		hw_ntxqueues = 8;
   4542 		hw_nrxqueues = 8;
   4543 		break;
   4544 	case WM_T_I210:
   4545 		hw_ntxqueues = 4;
   4546 		hw_nrxqueues = 4;
   4547 		break;
   4548 	case WM_T_I211:
   4549 		hw_ntxqueues = 2;
   4550 		hw_nrxqueues = 2;
   4551 		break;
   4552 		/*
   4553 		 * As below ethernet controllers does not support MSI-X,
   4554 		 * this driver let them not use multiqueue.
   4555 		 *     - WM_T_80003
   4556 		 *     - WM_T_ICH8
   4557 		 *     - WM_T_ICH9
   4558 		 *     - WM_T_ICH10
   4559 		 *     - WM_T_PCH
   4560 		 *     - WM_T_PCH2
   4561 		 *     - WM_T_PCH_LPT
   4562 		 */
   4563 	default:
   4564 		hw_ntxqueues = 1;
   4565 		hw_nrxqueues = 1;
   4566 		break;
   4567 	}
   4568 
   4569 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4570 
   4571 	/*
   4572 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4573 	 * the number of queues used actually.
   4574 	 */
   4575 	if (nvectors < hw_nqueues + 1) {
   4576 		sc->sc_nqueues = nvectors - 1;
   4577 	} else {
   4578 		sc->sc_nqueues = hw_nqueues;
   4579 	}
   4580 
   4581 	/*
   4582 	 * As queues more then cpus cannot improve scaling, we limit
   4583 	 * the number of queues used actually.
   4584 	 */
   4585 	if (ncpu < sc->sc_nqueues)
   4586 		sc->sc_nqueues = ncpu;
   4587 }
   4588 
   4589 static int
   4590 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4591 {
   4592 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4593 	wmq->wmq_id = qidx;
   4594 	wmq->wmq_intr_idx = intr_idx;
   4595 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4596 #ifdef WM_MPSAFE
   4597 	    | SOFTINT_MPSAFE
   4598 #endif
   4599 	    , wm_handle_queue, wmq);
   4600 	if (wmq->wmq_si != NULL)
   4601 		return 0;
   4602 
   4603 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4604 	    wmq->wmq_id);
   4605 
   4606 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4607 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4608 	return ENOMEM;
   4609 }
   4610 
   4611 /*
   4612  * Both single interrupt MSI and INTx can use this function.
   4613  */
   4614 static int
   4615 wm_setup_legacy(struct wm_softc *sc)
   4616 {
   4617 	pci_chipset_tag_t pc = sc->sc_pc;
   4618 	const char *intrstr = NULL;
   4619 	char intrbuf[PCI_INTRSTR_LEN];
   4620 	int error;
   4621 
   4622 	error = wm_alloc_txrx_queues(sc);
   4623 	if (error) {
   4624 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4625 		    error);
   4626 		return ENOMEM;
   4627 	}
   4628 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4629 	    sizeof(intrbuf));
   4630 #ifdef WM_MPSAFE
   4631 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4632 #endif
   4633 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4634 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4635 	if (sc->sc_ihs[0] == NULL) {
   4636 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4637 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4638 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4639 		return ENOMEM;
   4640 	}
   4641 
   4642 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4643 	sc->sc_nintrs = 1;
   4644 
   4645 	return wm_softint_establish(sc, 0, 0);
   4646 }
   4647 
   4648 static int
   4649 wm_setup_msix(struct wm_softc *sc)
   4650 {
   4651 	void *vih;
   4652 	kcpuset_t *affinity;
   4653 	int qidx, error, intr_idx, txrx_established;
   4654 	pci_chipset_tag_t pc = sc->sc_pc;
   4655 	const char *intrstr = NULL;
   4656 	char intrbuf[PCI_INTRSTR_LEN];
   4657 	char intr_xname[INTRDEVNAMEBUF];
   4658 
   4659 	if (sc->sc_nqueues < ncpu) {
   4660 		/*
   4661 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4662 		 * interrupts start from CPU#1.
   4663 		 */
   4664 		sc->sc_affinity_offset = 1;
   4665 	} else {
   4666 		/*
   4667 		 * In this case, this device use all CPUs. So, we unify
   4668 		 * affinitied cpu_index to msix vector number for readability.
   4669 		 */
   4670 		sc->sc_affinity_offset = 0;
   4671 	}
   4672 
   4673 	error = wm_alloc_txrx_queues(sc);
   4674 	if (error) {
   4675 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4676 		    error);
   4677 		return ENOMEM;
   4678 	}
   4679 
   4680 	kcpuset_create(&affinity, false);
   4681 	intr_idx = 0;
   4682 
   4683 	/*
   4684 	 * TX and RX
   4685 	 */
   4686 	txrx_established = 0;
   4687 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4688 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4689 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4690 
   4691 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4692 		    sizeof(intrbuf));
   4693 #ifdef WM_MPSAFE
   4694 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4695 		    PCI_INTR_MPSAFE, true);
   4696 #endif
   4697 		memset(intr_xname, 0, sizeof(intr_xname));
   4698 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4699 		    device_xname(sc->sc_dev), qidx);
   4700 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4701 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4702 		if (vih == NULL) {
   4703 			aprint_error_dev(sc->sc_dev,
   4704 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4705 			    intrstr ? " at " : "",
   4706 			    intrstr ? intrstr : "");
   4707 
   4708 			goto fail;
   4709 		}
   4710 		kcpuset_zero(affinity);
   4711 		/* Round-robin affinity */
   4712 		kcpuset_set(affinity, affinity_to);
   4713 		error = interrupt_distribute(vih, affinity, NULL);
   4714 		if (error == 0) {
   4715 			aprint_normal_dev(sc->sc_dev,
   4716 			    "for TX and RX interrupting at %s affinity to %u\n",
   4717 			    intrstr, affinity_to);
   4718 		} else {
   4719 			aprint_normal_dev(sc->sc_dev,
   4720 			    "for TX and RX interrupting at %s\n", intrstr);
   4721 		}
   4722 		sc->sc_ihs[intr_idx] = vih;
   4723 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4724 			goto fail;
   4725 		txrx_established++;
   4726 		intr_idx++;
   4727 	}
   4728 
   4729 	/*
   4730 	 * LINK
   4731 	 */
   4732 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4733 	    sizeof(intrbuf));
   4734 #ifdef WM_MPSAFE
   4735 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4736 #endif
   4737 	memset(intr_xname, 0, sizeof(intr_xname));
   4738 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4739 	    device_xname(sc->sc_dev));
   4740 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4741 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4742 	if (vih == NULL) {
   4743 		aprint_error_dev(sc->sc_dev,
   4744 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4745 		    intrstr ? " at " : "",
   4746 		    intrstr ? intrstr : "");
   4747 
   4748 		goto fail;
   4749 	}
   4750 	/* keep default affinity to LINK interrupt */
   4751 	aprint_normal_dev(sc->sc_dev,
   4752 	    "for LINK interrupting at %s\n", intrstr);
   4753 	sc->sc_ihs[intr_idx] = vih;
   4754 	sc->sc_link_intr_idx = intr_idx;
   4755 
   4756 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4757 	kcpuset_destroy(affinity);
   4758 	return 0;
   4759 
   4760  fail:
   4761 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4762 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4763 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4764 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4765 	}
   4766 
   4767 	kcpuset_destroy(affinity);
   4768 	return ENOMEM;
   4769 }
   4770 
   4771 static void
   4772 wm_turnon(struct wm_softc *sc)
   4773 {
   4774 	int i;
   4775 
   4776 	KASSERT(WM_CORE_LOCKED(sc));
   4777 
   4778 	/*
   4779 	 * must unset stopping flags in ascending order.
   4780 	 */
   4781 	for(i = 0; i < sc->sc_nqueues; i++) {
   4782 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4784 
   4785 		mutex_enter(txq->txq_lock);
   4786 		txq->txq_stopping = false;
   4787 		mutex_exit(txq->txq_lock);
   4788 
   4789 		mutex_enter(rxq->rxq_lock);
   4790 		rxq->rxq_stopping = false;
   4791 		mutex_exit(rxq->rxq_lock);
   4792 	}
   4793 
   4794 	sc->sc_core_stopping = false;
   4795 }
   4796 
   4797 static void
   4798 wm_turnoff(struct wm_softc *sc)
   4799 {
   4800 	int i;
   4801 
   4802 	KASSERT(WM_CORE_LOCKED(sc));
   4803 
   4804 	sc->sc_core_stopping = true;
   4805 
   4806 	/*
   4807 	 * must set stopping flags in ascending order.
   4808 	 */
   4809 	for(i = 0; i < sc->sc_nqueues; i++) {
   4810 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4811 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4812 
   4813 		mutex_enter(rxq->rxq_lock);
   4814 		rxq->rxq_stopping = true;
   4815 		mutex_exit(rxq->rxq_lock);
   4816 
   4817 		mutex_enter(txq->txq_lock);
   4818 		txq->txq_stopping = true;
   4819 		mutex_exit(txq->txq_lock);
   4820 	}
   4821 }
   4822 
   4823 /*
   4824  * write interrupt interval value to ITR or EITR
   4825  */
   4826 static void
   4827 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4828 {
   4829 
   4830 	if (!wmq->wmq_set_itr)
   4831 		return;
   4832 
   4833 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4834 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4835 
   4836 		/*
   4837 		 * 82575 doesn't have CNT_INGR field.
   4838 		 * So, overwrite counter field by software.
   4839 		 */
   4840 		if (sc->sc_type == WM_T_82575)
   4841 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4842 		else
   4843 			eitr |= EITR_CNT_INGR;
   4844 
   4845 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4846 	} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   4847 		/*
   4848 		 * 82574 has both ITR and EITR. SET EITR when we use
   4849 		 * the multi queue function with MSI-X.
   4850 		 */
   4851 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4852 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4853 	} else {
   4854 		KASSERT(wmq->wmq_id == 0);
   4855 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4856 	}
   4857 
   4858 	wmq->wmq_set_itr = false;
   4859 }
   4860 
   4861 /*
   4862  * TODO
   4863  * Below dynamic calculation of itr is almost the same as linux igb,
   4864  * however it does not fit to wm(4). So, we will have been disable AIM
   4865  * until we will find appropriate calculation of itr.
   4866  */
   4867 /*
   4868  * calculate interrupt interval value to be going to write register in
   4869  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4870  */
   4871 static void
   4872 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4873 {
   4874 #ifdef NOTYET
   4875 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4876 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4877 	uint32_t avg_size = 0;
   4878 	uint32_t new_itr;
   4879 
   4880 	if (rxq->rxq_packets)
   4881 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4882 	if (txq->txq_packets)
   4883 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4884 
   4885 	if (avg_size == 0) {
   4886 		new_itr = 450; /* restore default value */
   4887 		goto out;
   4888 	}
   4889 
   4890 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4891 	avg_size += 24;
   4892 
   4893 	/* Don't starve jumbo frames */
   4894 	avg_size = min(avg_size, 3000);
   4895 
   4896 	/* Give a little boost to mid-size frames */
   4897 	if ((avg_size > 300) && (avg_size < 1200))
   4898 		new_itr = avg_size / 3;
   4899 	else
   4900 		new_itr = avg_size / 2;
   4901 
   4902 out:
   4903 	/*
   4904 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4905 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4906 	 */
   4907 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4908 		new_itr *= 4;
   4909 
   4910 	if (new_itr != wmq->wmq_itr) {
   4911 		wmq->wmq_itr = new_itr;
   4912 		wmq->wmq_set_itr = true;
   4913 	} else
   4914 		wmq->wmq_set_itr = false;
   4915 
   4916 	rxq->rxq_packets = 0;
   4917 	rxq->rxq_bytes = 0;
   4918 	txq->txq_packets = 0;
   4919 	txq->txq_bytes = 0;
   4920 #endif
   4921 }
   4922 
   4923 /*
   4924  * wm_init:		[ifnet interface function]
   4925  *
   4926  *	Initialize the interface.
   4927  */
   4928 static int
   4929 wm_init(struct ifnet *ifp)
   4930 {
   4931 	struct wm_softc *sc = ifp->if_softc;
   4932 	int ret;
   4933 
   4934 	WM_CORE_LOCK(sc);
   4935 	ret = wm_init_locked(ifp);
   4936 	WM_CORE_UNLOCK(sc);
   4937 
   4938 	return ret;
   4939 }
   4940 
   4941 static int
   4942 wm_init_locked(struct ifnet *ifp)
   4943 {
   4944 	struct wm_softc *sc = ifp->if_softc;
   4945 	int i, j, trynum, error = 0;
   4946 	uint32_t reg;
   4947 
   4948 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4949 		device_xname(sc->sc_dev), __func__));
   4950 	KASSERT(WM_CORE_LOCKED(sc));
   4951 
   4952 	/*
   4953 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4954 	 * There is a small but measurable benefit to avoiding the adjusment
   4955 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4956 	 * on such platforms.  One possibility is that the DMA itself is
   4957 	 * slightly more efficient if the front of the entire packet (instead
   4958 	 * of the front of the headers) is aligned.
   4959 	 *
   4960 	 * Note we must always set align_tweak to 0 if we are using
   4961 	 * jumbo frames.
   4962 	 */
   4963 #ifdef __NO_STRICT_ALIGNMENT
   4964 	sc->sc_align_tweak = 0;
   4965 #else
   4966 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4967 		sc->sc_align_tweak = 0;
   4968 	else
   4969 		sc->sc_align_tweak = 2;
   4970 #endif /* __NO_STRICT_ALIGNMENT */
   4971 
   4972 	/* Cancel any pending I/O. */
   4973 	wm_stop_locked(ifp, 0);
   4974 
   4975 	/* update statistics before reset */
   4976 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4977 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4978 
   4979 	/* PCH_SPT hardware workaround */
   4980 	if (sc->sc_type == WM_T_PCH_SPT)
   4981 		wm_flush_desc_rings(sc);
   4982 
   4983 	/* Reset the chip to a known state. */
   4984 	wm_reset(sc);
   4985 
   4986 	/* AMT based hardware can now take control from firmware */
   4987 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4988 		wm_get_hw_control(sc);
   4989 
   4990 	/* Init hardware bits */
   4991 	wm_initialize_hardware_bits(sc);
   4992 
   4993 	/* Reset the PHY. */
   4994 	if (sc->sc_flags & WM_F_HAS_MII)
   4995 		wm_gmii_reset(sc);
   4996 
   4997 	/* Calculate (E)ITR value */
   4998 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   4999 		/*
   5000 		 * For NEWQUEUE's EITR (except for 82575).
   5001 		 * 82575's EITR should be set same throttling value as other
   5002 		 * old controllers' ITR because the interrupt/sec calculation
   5003 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5004 		 *
   5005 		 * 82574's EITR should be set same throttling value as ITR.
   5006 		 *
   5007 		 * For N interrupts/sec, set this value to:
   5008 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5009 		 */
   5010 		sc->sc_itr_init = 450;
   5011 	} else if (sc->sc_type >= WM_T_82543) {
   5012 		/*
   5013 		 * Set up the interrupt throttling register (units of 256ns)
   5014 		 * Note that a footnote in Intel's documentation says this
   5015 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5016 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5017 		 * that that is also true for the 1024ns units of the other
   5018 		 * interrupt-related timer registers -- so, really, we ought
   5019 		 * to divide this value by 4 when the link speed is low.
   5020 		 *
   5021 		 * XXX implement this division at link speed change!
   5022 		 */
   5023 
   5024 		/*
   5025 		 * For N interrupts/sec, set this value to:
   5026 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5027 		 * absolute and packet timer values to this value
   5028 		 * divided by 4 to get "simple timer" behavior.
   5029 		 */
   5030 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5031 	}
   5032 
   5033 	error = wm_init_txrx_queues(sc);
   5034 	if (error)
   5035 		goto out;
   5036 
   5037 	/*
   5038 	 * Clear out the VLAN table -- we don't use it (yet).
   5039 	 */
   5040 	CSR_WRITE(sc, WMREG_VET, 0);
   5041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5042 		trynum = 10; /* Due to hw errata */
   5043 	else
   5044 		trynum = 1;
   5045 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5046 		for (j = 0; j < trynum; j++)
   5047 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5048 
   5049 	/*
   5050 	 * Set up flow-control parameters.
   5051 	 *
   5052 	 * XXX Values could probably stand some tuning.
   5053 	 */
   5054 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5055 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5056 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5057 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5058 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5059 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5060 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5061 	}
   5062 
   5063 	sc->sc_fcrtl = FCRTL_DFLT;
   5064 	if (sc->sc_type < WM_T_82543) {
   5065 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5066 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5067 	} else {
   5068 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5069 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5070 	}
   5071 
   5072 	if (sc->sc_type == WM_T_80003)
   5073 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5074 	else
   5075 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5076 
   5077 	/* Writes the control register. */
   5078 	wm_set_vlan(sc);
   5079 
   5080 	if (sc->sc_flags & WM_F_HAS_MII) {
   5081 		int val;
   5082 
   5083 		switch (sc->sc_type) {
   5084 		case WM_T_80003:
   5085 		case WM_T_ICH8:
   5086 		case WM_T_ICH9:
   5087 		case WM_T_ICH10:
   5088 		case WM_T_PCH:
   5089 		case WM_T_PCH2:
   5090 		case WM_T_PCH_LPT:
   5091 		case WM_T_PCH_SPT:
   5092 			/*
   5093 			 * Set the mac to wait the maximum time between each
   5094 			 * iteration and increase the max iterations when
   5095 			 * polling the phy; this fixes erroneous timeouts at
   5096 			 * 10Mbps.
   5097 			 */
   5098 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5099 			    0xFFFF);
   5100 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5101 			val |= 0x3F;
   5102 			wm_kmrn_writereg(sc,
   5103 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5104 			break;
   5105 		default:
   5106 			break;
   5107 		}
   5108 
   5109 		if (sc->sc_type == WM_T_80003) {
   5110 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5111 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5112 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5113 
   5114 			/* Bypass RX and TX FIFO's */
   5115 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5116 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5117 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5118 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5119 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5120 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5121 		}
   5122 	}
   5123 #if 0
   5124 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5125 #endif
   5126 
   5127 	/* Set up checksum offload parameters. */
   5128 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5129 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5130 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5131 		reg |= RXCSUM_IPOFL;
   5132 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5133 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5134 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5135 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5136 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5137 
   5138 	/* Set up MSI-X */
   5139 	if (sc->sc_nintrs > 1) {
   5140 		uint32_t ivar;
   5141 		struct wm_queue *wmq;
   5142 		int qid, qintr_idx;
   5143 
   5144 		if (sc->sc_type == WM_T_82575) {
   5145 			/* Interrupt control */
   5146 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5147 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5149 
   5150 			/* TX and RX */
   5151 			for (i = 0; i < sc->sc_nqueues; i++) {
   5152 				wmq = &sc->sc_queue[i];
   5153 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5154 				    EITR_TX_QUEUE(wmq->wmq_id)
   5155 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5156 			}
   5157 			/* Link status */
   5158 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5159 			    EITR_OTHER);
   5160 		} else if (sc->sc_type == WM_T_82574) {
   5161 			/* Interrupt control */
   5162 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5163 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5165 
   5166 			/*
   5167 			 * workaround issue with spurious interrupts
   5168 			 * in MSI-X mode.
   5169 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5170 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5171 			 */
   5172 			reg = CSR_READ(sc, WMREG_RFCTL);
   5173 			reg |= WMREG_RFCTL_ACKDIS;
   5174 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5175 
   5176 			ivar = 0;
   5177 			/* TX and RX */
   5178 			for (i = 0; i < sc->sc_nqueues; i++) {
   5179 				wmq = &sc->sc_queue[i];
   5180 				qid = wmq->wmq_id;
   5181 				qintr_idx = wmq->wmq_intr_idx;
   5182 
   5183 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5184 				    IVAR_TX_MASK_Q_82574(qid));
   5185 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5186 				    IVAR_RX_MASK_Q_82574(qid));
   5187 			}
   5188 			/* Link status */
   5189 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5190 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5191 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5192 		} else {
   5193 			/* Interrupt control */
   5194 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5195 			    | GPIE_EIAME | GPIE_PBA);
   5196 
   5197 			switch (sc->sc_type) {
   5198 			case WM_T_82580:
   5199 			case WM_T_I350:
   5200 			case WM_T_I354:
   5201 			case WM_T_I210:
   5202 			case WM_T_I211:
   5203 				/* TX and RX */
   5204 				for (i = 0; i < sc->sc_nqueues; i++) {
   5205 					wmq = &sc->sc_queue[i];
   5206 					qid = wmq->wmq_id;
   5207 					qintr_idx = wmq->wmq_intr_idx;
   5208 
   5209 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5210 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5211 					ivar |= __SHIFTIN((qintr_idx
   5212 						| IVAR_VALID),
   5213 					    IVAR_TX_MASK_Q(qid));
   5214 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5215 					ivar |= __SHIFTIN((qintr_idx
   5216 						| IVAR_VALID),
   5217 					    IVAR_RX_MASK_Q(qid));
   5218 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5219 				}
   5220 				break;
   5221 			case WM_T_82576:
   5222 				/* TX and RX */
   5223 				for (i = 0; i < sc->sc_nqueues; i++) {
   5224 					wmq = &sc->sc_queue[i];
   5225 					qid = wmq->wmq_id;
   5226 					qintr_idx = wmq->wmq_intr_idx;
   5227 
   5228 					ivar = CSR_READ(sc,
   5229 					    WMREG_IVAR_Q_82576(qid));
   5230 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5231 					ivar |= __SHIFTIN((qintr_idx
   5232 						| IVAR_VALID),
   5233 					    IVAR_TX_MASK_Q_82576(qid));
   5234 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5235 					ivar |= __SHIFTIN((qintr_idx
   5236 						| IVAR_VALID),
   5237 					    IVAR_RX_MASK_Q_82576(qid));
   5238 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5239 					    ivar);
   5240 				}
   5241 				break;
   5242 			default:
   5243 				break;
   5244 			}
   5245 
   5246 			/* Link status */
   5247 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5248 			    IVAR_MISC_OTHER);
   5249 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5250 		}
   5251 
   5252 		if (sc->sc_nqueues > 1) {
   5253 			wm_init_rss(sc);
   5254 
   5255 			/*
   5256 			** NOTE: Receive Full-Packet Checksum Offload
   5257 			** is mutually exclusive with Multiqueue. However
   5258 			** this is not the same as TCP/IP checksums which
   5259 			** still work.
   5260 			*/
   5261 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5262 			reg |= RXCSUM_PCSD;
   5263 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5264 		}
   5265 	}
   5266 
   5267 	/* Set up the interrupt registers. */
   5268 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5269 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5270 	    ICR_RXO | ICR_RXT0;
   5271 	if (sc->sc_nintrs > 1) {
   5272 		uint32_t mask;
   5273 		struct wm_queue *wmq;
   5274 
   5275 		switch (sc->sc_type) {
   5276 		case WM_T_82574:
   5277 			mask = 0;
   5278 			for (i = 0; i < sc->sc_nqueues; i++) {
   5279 				wmq = &sc->sc_queue[i];
   5280 				mask |= ICR_TXQ(wmq->wmq_id);
   5281 				mask |= ICR_RXQ(wmq->wmq_id);
   5282 			}
   5283 			mask |= ICR_OTHER;
   5284 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5285 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5286 			break;
   5287 		default:
   5288 			if (sc->sc_type == WM_T_82575) {
   5289 				mask = 0;
   5290 				for (i = 0; i < sc->sc_nqueues; i++) {
   5291 					wmq = &sc->sc_queue[i];
   5292 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5293 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5294 				}
   5295 				mask |= EITR_OTHER;
   5296 			} else {
   5297 				mask = 0;
   5298 				for (i = 0; i < sc->sc_nqueues; i++) {
   5299 					wmq = &sc->sc_queue[i];
   5300 					mask |= 1 << wmq->wmq_intr_idx;
   5301 				}
   5302 				mask |= 1 << sc->sc_link_intr_idx;
   5303 			}
   5304 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5305 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5306 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5307 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5308 			break;
   5309 		}
   5310 	} else
   5311 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5312 
   5313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5314 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5315 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5316 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5317 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5318 		reg |= KABGTXD_BGSQLBIAS;
   5319 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5320 	}
   5321 
   5322 	/* Set up the inter-packet gap. */
   5323 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5324 
   5325 	if (sc->sc_type >= WM_T_82543) {
   5326 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5327 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5328 			wm_itrs_writereg(sc, wmq);
   5329 		}
   5330 		/*
   5331 		 * Link interrupts occur much less than TX
   5332 		 * interrupts and RX interrupts. So, we don't
   5333 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5334 		 * FreeBSD's if_igb.
   5335 		 */
   5336 	}
   5337 
   5338 	/* Set the VLAN ethernetype. */
   5339 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5340 
   5341 	/*
   5342 	 * Set up the transmit control register; we start out with
   5343 	 * a collision distance suitable for FDX, but update it whe
   5344 	 * we resolve the media type.
   5345 	 */
   5346 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5347 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5348 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5349 	if (sc->sc_type >= WM_T_82571)
   5350 		sc->sc_tctl |= TCTL_MULR;
   5351 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5352 
   5353 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5354 		/* Write TDT after TCTL.EN is set. See the document. */
   5355 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5356 	}
   5357 
   5358 	if (sc->sc_type == WM_T_80003) {
   5359 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5360 		reg &= ~TCTL_EXT_GCEX_MASK;
   5361 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5362 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5363 	}
   5364 
   5365 	/* Set the media. */
   5366 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5367 		goto out;
   5368 
   5369 	/* Configure for OS presence */
   5370 	wm_init_manageability(sc);
   5371 
   5372 	/*
   5373 	 * Set up the receive control register; we actually program
   5374 	 * the register when we set the receive filter.  Use multicast
   5375 	 * address offset type 0.
   5376 	 *
   5377 	 * Only the i82544 has the ability to strip the incoming
   5378 	 * CRC, so we don't enable that feature.
   5379 	 */
   5380 	sc->sc_mchash_type = 0;
   5381 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5382 	    | RCTL_MO(sc->sc_mchash_type);
   5383 
   5384 	/*
   5385 	 * 82574 use one buffer extended Rx descriptor.
   5386 	 */
   5387 	if (sc->sc_type == WM_T_82574)
   5388 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5389 
   5390 	/*
   5391 	 * The I350 has a bug where it always strips the CRC whether
   5392 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5393 	 */
   5394 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5395 	    || (sc->sc_type == WM_T_I210))
   5396 		sc->sc_rctl |= RCTL_SECRC;
   5397 
   5398 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5399 	    && (ifp->if_mtu > ETHERMTU)) {
   5400 		sc->sc_rctl |= RCTL_LPE;
   5401 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5402 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5403 	}
   5404 
   5405 	if (MCLBYTES == 2048) {
   5406 		sc->sc_rctl |= RCTL_2k;
   5407 	} else {
   5408 		if (sc->sc_type >= WM_T_82543) {
   5409 			switch (MCLBYTES) {
   5410 			case 4096:
   5411 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5412 				break;
   5413 			case 8192:
   5414 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5415 				break;
   5416 			case 16384:
   5417 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5418 				break;
   5419 			default:
   5420 				panic("wm_init: MCLBYTES %d unsupported",
   5421 				    MCLBYTES);
   5422 				break;
   5423 			}
   5424 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5425 	}
   5426 
   5427 	/* Set the receive filter. */
   5428 	wm_set_filter(sc);
   5429 
   5430 	/* Enable ECC */
   5431 	switch (sc->sc_type) {
   5432 	case WM_T_82571:
   5433 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5434 		reg |= PBA_ECC_CORR_EN;
   5435 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5436 		break;
   5437 	case WM_T_PCH_LPT:
   5438 	case WM_T_PCH_SPT:
   5439 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5440 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5441 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5442 
   5443 		sc->sc_ctrl |= CTRL_MEHE;
   5444 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5445 		break;
   5446 	default:
   5447 		break;
   5448 	}
   5449 
   5450 	/* On 575 and later set RDT only if RX enabled */
   5451 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5452 		int qidx;
   5453 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5454 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5455 			for (i = 0; i < WM_NRXDESC; i++) {
   5456 				mutex_enter(rxq->rxq_lock);
   5457 				wm_init_rxdesc(rxq, i);
   5458 				mutex_exit(rxq->rxq_lock);
   5459 
   5460 			}
   5461 		}
   5462 	}
   5463 
   5464 	wm_turnon(sc);
   5465 
   5466 	/* Start the one second link check clock. */
   5467 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5468 
   5469 	/* ...all done! */
   5470 	ifp->if_flags |= IFF_RUNNING;
   5471 	ifp->if_flags &= ~IFF_OACTIVE;
   5472 
   5473  out:
   5474 	sc->sc_if_flags = ifp->if_flags;
   5475 	if (error)
   5476 		log(LOG_ERR, "%s: interface not running\n",
   5477 		    device_xname(sc->sc_dev));
   5478 	return error;
   5479 }
   5480 
   5481 /*
   5482  * wm_stop:		[ifnet interface function]
   5483  *
   5484  *	Stop transmission on the interface.
   5485  */
   5486 static void
   5487 wm_stop(struct ifnet *ifp, int disable)
   5488 {
   5489 	struct wm_softc *sc = ifp->if_softc;
   5490 
   5491 	WM_CORE_LOCK(sc);
   5492 	wm_stop_locked(ifp, disable);
   5493 	WM_CORE_UNLOCK(sc);
   5494 }
   5495 
   5496 static void
   5497 wm_stop_locked(struct ifnet *ifp, int disable)
   5498 {
   5499 	struct wm_softc *sc = ifp->if_softc;
   5500 	struct wm_txsoft *txs;
   5501 	int i, qidx;
   5502 
   5503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5504 		device_xname(sc->sc_dev), __func__));
   5505 	KASSERT(WM_CORE_LOCKED(sc));
   5506 
   5507 	wm_turnoff(sc);
   5508 
   5509 	/* Stop the one second clock. */
   5510 	callout_stop(&sc->sc_tick_ch);
   5511 
   5512 	/* Stop the 82547 Tx FIFO stall check timer. */
   5513 	if (sc->sc_type == WM_T_82547)
   5514 		callout_stop(&sc->sc_txfifo_ch);
   5515 
   5516 	if (sc->sc_flags & WM_F_HAS_MII) {
   5517 		/* Down the MII. */
   5518 		mii_down(&sc->sc_mii);
   5519 	} else {
   5520 #if 0
   5521 		/* Should we clear PHY's status properly? */
   5522 		wm_reset(sc);
   5523 #endif
   5524 	}
   5525 
   5526 	/* Stop the transmit and receive processes. */
   5527 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5528 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5529 	sc->sc_rctl &= ~RCTL_EN;
   5530 
   5531 	/*
   5532 	 * Clear the interrupt mask to ensure the device cannot assert its
   5533 	 * interrupt line.
   5534 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5535 	 * service any currently pending or shared interrupt.
   5536 	 */
   5537 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5538 	sc->sc_icr = 0;
   5539 	if (sc->sc_nintrs > 1) {
   5540 		if (sc->sc_type != WM_T_82574) {
   5541 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5542 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5543 		} else
   5544 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5545 	}
   5546 
   5547 	/* Release any queued transmit buffers. */
   5548 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5549 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5550 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5551 		mutex_enter(txq->txq_lock);
   5552 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5553 			txs = &txq->txq_soft[i];
   5554 			if (txs->txs_mbuf != NULL) {
   5555 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5556 				m_freem(txs->txs_mbuf);
   5557 				txs->txs_mbuf = NULL;
   5558 			}
   5559 		}
   5560 		mutex_exit(txq->txq_lock);
   5561 	}
   5562 
   5563 	/* Mark the interface as down and cancel the watchdog timer. */
   5564 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5565 	ifp->if_timer = 0;
   5566 
   5567 	if (disable) {
   5568 		for (i = 0; i < sc->sc_nqueues; i++) {
   5569 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5570 			mutex_enter(rxq->rxq_lock);
   5571 			wm_rxdrain(rxq);
   5572 			mutex_exit(rxq->rxq_lock);
   5573 		}
   5574 	}
   5575 
   5576 #if 0 /* notyet */
   5577 	if (sc->sc_type >= WM_T_82544)
   5578 		CSR_WRITE(sc, WMREG_WUC, 0);
   5579 #endif
   5580 }
   5581 
   5582 static void
   5583 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5584 {
   5585 	struct mbuf *m;
   5586 	int i;
   5587 
   5588 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5589 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5590 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5591 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5592 		    m->m_data, m->m_len, m->m_flags);
   5593 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5594 	    i, i == 1 ? "" : "s");
   5595 }
   5596 
   5597 /*
   5598  * wm_82547_txfifo_stall:
   5599  *
   5600  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5601  *	reset the FIFO pointers, and restart packet transmission.
   5602  */
   5603 static void
   5604 wm_82547_txfifo_stall(void *arg)
   5605 {
   5606 	struct wm_softc *sc = arg;
   5607 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5608 
   5609 	mutex_enter(txq->txq_lock);
   5610 
   5611 	if (txq->txq_stopping)
   5612 		goto out;
   5613 
   5614 	if (txq->txq_fifo_stall) {
   5615 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5616 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5617 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5618 			/*
   5619 			 * Packets have drained.  Stop transmitter, reset
   5620 			 * FIFO pointers, restart transmitter, and kick
   5621 			 * the packet queue.
   5622 			 */
   5623 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5624 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5625 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5626 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5627 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5628 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5629 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5630 			CSR_WRITE_FLUSH(sc);
   5631 
   5632 			txq->txq_fifo_head = 0;
   5633 			txq->txq_fifo_stall = 0;
   5634 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5635 		} else {
   5636 			/*
   5637 			 * Still waiting for packets to drain; try again in
   5638 			 * another tick.
   5639 			 */
   5640 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5641 		}
   5642 	}
   5643 
   5644 out:
   5645 	mutex_exit(txq->txq_lock);
   5646 }
   5647 
   5648 /*
   5649  * wm_82547_txfifo_bugchk:
   5650  *
   5651  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5652  *	prevent enqueueing a packet that would wrap around the end
   5653  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5654  *
   5655  *	We do this by checking the amount of space before the end
   5656  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5657  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5658  *	the internal FIFO pointers to the beginning, and restart
   5659  *	transmission on the interface.
   5660  */
   5661 #define	WM_FIFO_HDR		0x10
   5662 #define	WM_82547_PAD_LEN	0x3e0
   5663 static int
   5664 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5665 {
   5666 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5667 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5668 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5669 
   5670 	/* Just return if already stalled. */
   5671 	if (txq->txq_fifo_stall)
   5672 		return 1;
   5673 
   5674 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5675 		/* Stall only occurs in half-duplex mode. */
   5676 		goto send_packet;
   5677 	}
   5678 
   5679 	if (len >= WM_82547_PAD_LEN + space) {
   5680 		txq->txq_fifo_stall = 1;
   5681 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5682 		return 1;
   5683 	}
   5684 
   5685  send_packet:
   5686 	txq->txq_fifo_head += len;
   5687 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5688 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5689 
   5690 	return 0;
   5691 }
   5692 
   5693 static int
   5694 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5695 {
   5696 	int error;
   5697 
   5698 	/*
   5699 	 * Allocate the control data structures, and create and load the
   5700 	 * DMA map for it.
   5701 	 *
   5702 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5703 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5704 	 * both sets within the same 4G segment.
   5705 	 */
   5706 	if (sc->sc_type < WM_T_82544)
   5707 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5708 	else
   5709 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5710 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5711 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5712 	else
   5713 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5714 
   5715 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5716 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5717 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5718 		aprint_error_dev(sc->sc_dev,
   5719 		    "unable to allocate TX control data, error = %d\n",
   5720 		    error);
   5721 		goto fail_0;
   5722 	}
   5723 
   5724 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5725 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5726 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5727 		aprint_error_dev(sc->sc_dev,
   5728 		    "unable to map TX control data, error = %d\n", error);
   5729 		goto fail_1;
   5730 	}
   5731 
   5732 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5733 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5734 		aprint_error_dev(sc->sc_dev,
   5735 		    "unable to create TX control data DMA map, error = %d\n",
   5736 		    error);
   5737 		goto fail_2;
   5738 	}
   5739 
   5740 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5741 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5742 		aprint_error_dev(sc->sc_dev,
   5743 		    "unable to load TX control data DMA map, error = %d\n",
   5744 		    error);
   5745 		goto fail_3;
   5746 	}
   5747 
   5748 	return 0;
   5749 
   5750  fail_3:
   5751 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5752  fail_2:
   5753 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5754 	    WM_TXDESCS_SIZE(txq));
   5755  fail_1:
   5756 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5757  fail_0:
   5758 	return error;
   5759 }
   5760 
   5761 static void
   5762 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5763 {
   5764 
   5765 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5766 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5767 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5768 	    WM_TXDESCS_SIZE(txq));
   5769 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5770 }
   5771 
   5772 static int
   5773 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5774 {
   5775 	int error;
   5776 	size_t rxq_descs_size;
   5777 
   5778 	/*
   5779 	 * Allocate the control data structures, and create and load the
   5780 	 * DMA map for it.
   5781 	 *
   5782 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5783 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5784 	 * both sets within the same 4G segment.
   5785 	 */
   5786 	rxq->rxq_ndesc = WM_NRXDESC;
   5787 	if (sc->sc_type == WM_T_82574)
   5788 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5789 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5790 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5791 	else
   5792 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5793 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5794 
   5795 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5796 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5797 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5798 		aprint_error_dev(sc->sc_dev,
   5799 		    "unable to allocate RX control data, error = %d\n",
   5800 		    error);
   5801 		goto fail_0;
   5802 	}
   5803 
   5804 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5805 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5806 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5807 		aprint_error_dev(sc->sc_dev,
   5808 		    "unable to map RX control data, error = %d\n", error);
   5809 		goto fail_1;
   5810 	}
   5811 
   5812 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5813 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5814 		aprint_error_dev(sc->sc_dev,
   5815 		    "unable to create RX control data DMA map, error = %d\n",
   5816 		    error);
   5817 		goto fail_2;
   5818 	}
   5819 
   5820 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5821 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5822 		aprint_error_dev(sc->sc_dev,
   5823 		    "unable to load RX control data DMA map, error = %d\n",
   5824 		    error);
   5825 		goto fail_3;
   5826 	}
   5827 
   5828 	return 0;
   5829 
   5830  fail_3:
   5831 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5832  fail_2:
   5833 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5834 	    rxq_descs_size);
   5835  fail_1:
   5836 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5837  fail_0:
   5838 	return error;
   5839 }
   5840 
   5841 static void
   5842 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5843 {
   5844 
   5845 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5846 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5847 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5848 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5849 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5850 }
   5851 
   5852 
   5853 static int
   5854 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5855 {
   5856 	int i, error;
   5857 
   5858 	/* Create the transmit buffer DMA maps. */
   5859 	WM_TXQUEUELEN(txq) =
   5860 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5861 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5862 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5863 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5864 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5865 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5866 			aprint_error_dev(sc->sc_dev,
   5867 			    "unable to create Tx DMA map %d, error = %d\n",
   5868 			    i, error);
   5869 			goto fail;
   5870 		}
   5871 	}
   5872 
   5873 	return 0;
   5874 
   5875  fail:
   5876 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5877 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5878 			bus_dmamap_destroy(sc->sc_dmat,
   5879 			    txq->txq_soft[i].txs_dmamap);
   5880 	}
   5881 	return error;
   5882 }
   5883 
   5884 static void
   5885 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5886 {
   5887 	int i;
   5888 
   5889 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5890 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5891 			bus_dmamap_destroy(sc->sc_dmat,
   5892 			    txq->txq_soft[i].txs_dmamap);
   5893 	}
   5894 }
   5895 
   5896 static int
   5897 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5898 {
   5899 	int i, error;
   5900 
   5901 	/* Create the receive buffer DMA maps. */
   5902 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5903 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5904 			    MCLBYTES, 0, 0,
   5905 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5906 			aprint_error_dev(sc->sc_dev,
   5907 			    "unable to create Rx DMA map %d error = %d\n",
   5908 			    i, error);
   5909 			goto fail;
   5910 		}
   5911 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5912 	}
   5913 
   5914 	return 0;
   5915 
   5916  fail:
   5917 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5918 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5919 			bus_dmamap_destroy(sc->sc_dmat,
   5920 			    rxq->rxq_soft[i].rxs_dmamap);
   5921 	}
   5922 	return error;
   5923 }
   5924 
   5925 static void
   5926 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5927 {
   5928 	int i;
   5929 
   5930 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5931 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5932 			bus_dmamap_destroy(sc->sc_dmat,
   5933 			    rxq->rxq_soft[i].rxs_dmamap);
   5934 	}
   5935 }
   5936 
   5937 /*
   5938  * wm_alloc_quques:
   5939  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5940  */
   5941 static int
   5942 wm_alloc_txrx_queues(struct wm_softc *sc)
   5943 {
   5944 	int i, error, tx_done, rx_done;
   5945 
   5946 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5947 	    KM_SLEEP);
   5948 	if (sc->sc_queue == NULL) {
   5949 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5950 		error = ENOMEM;
   5951 		goto fail_0;
   5952 	}
   5953 
   5954 	/*
   5955 	 * For transmission
   5956 	 */
   5957 	error = 0;
   5958 	tx_done = 0;
   5959 	for (i = 0; i < sc->sc_nqueues; i++) {
   5960 #ifdef WM_EVENT_COUNTERS
   5961 		int j;
   5962 		const char *xname;
   5963 #endif
   5964 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5965 		txq->txq_sc = sc;
   5966 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5967 
   5968 		error = wm_alloc_tx_descs(sc, txq);
   5969 		if (error)
   5970 			break;
   5971 		error = wm_alloc_tx_buffer(sc, txq);
   5972 		if (error) {
   5973 			wm_free_tx_descs(sc, txq);
   5974 			break;
   5975 		}
   5976 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5977 		if (txq->txq_interq == NULL) {
   5978 			wm_free_tx_descs(sc, txq);
   5979 			wm_free_tx_buffer(sc, txq);
   5980 			error = ENOMEM;
   5981 			break;
   5982 		}
   5983 
   5984 #ifdef WM_EVENT_COUNTERS
   5985 		xname = device_xname(sc->sc_dev);
   5986 
   5987 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5988 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5989 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5990 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5991 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5992 
   5993 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5994 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5995 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5996 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5997 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5998 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5999 
   6000 		for (j = 0; j < WM_NTXSEGS; j++) {
   6001 			snprintf(txq->txq_txseg_evcnt_names[j],
   6002 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6003 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6004 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6005 		}
   6006 
   6007 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6008 
   6009 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6010 #endif /* WM_EVENT_COUNTERS */
   6011 
   6012 		tx_done++;
   6013 	}
   6014 	if (error)
   6015 		goto fail_1;
   6016 
   6017 	/*
   6018 	 * For recieve
   6019 	 */
   6020 	error = 0;
   6021 	rx_done = 0;
   6022 	for (i = 0; i < sc->sc_nqueues; i++) {
   6023 #ifdef WM_EVENT_COUNTERS
   6024 		const char *xname;
   6025 #endif
   6026 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6027 		rxq->rxq_sc = sc;
   6028 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6029 
   6030 		error = wm_alloc_rx_descs(sc, rxq);
   6031 		if (error)
   6032 			break;
   6033 
   6034 		error = wm_alloc_rx_buffer(sc, rxq);
   6035 		if (error) {
   6036 			wm_free_rx_descs(sc, rxq);
   6037 			break;
   6038 		}
   6039 
   6040 #ifdef WM_EVENT_COUNTERS
   6041 		xname = device_xname(sc->sc_dev);
   6042 
   6043 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6044 
   6045 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6046 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6047 #endif /* WM_EVENT_COUNTERS */
   6048 
   6049 		rx_done++;
   6050 	}
   6051 	if (error)
   6052 		goto fail_2;
   6053 
   6054 	return 0;
   6055 
   6056  fail_2:
   6057 	for (i = 0; i < rx_done; i++) {
   6058 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6059 		wm_free_rx_buffer(sc, rxq);
   6060 		wm_free_rx_descs(sc, rxq);
   6061 		if (rxq->rxq_lock)
   6062 			mutex_obj_free(rxq->rxq_lock);
   6063 	}
   6064  fail_1:
   6065 	for (i = 0; i < tx_done; i++) {
   6066 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6067 		pcq_destroy(txq->txq_interq);
   6068 		wm_free_tx_buffer(sc, txq);
   6069 		wm_free_tx_descs(sc, txq);
   6070 		if (txq->txq_lock)
   6071 			mutex_obj_free(txq->txq_lock);
   6072 	}
   6073 
   6074 	kmem_free(sc->sc_queue,
   6075 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6076  fail_0:
   6077 	return error;
   6078 }
   6079 
   6080 /*
   6081  * wm_free_quques:
   6082  *	Free {tx,rx}descs and {tx,rx} buffers
   6083  */
   6084 static void
   6085 wm_free_txrx_queues(struct wm_softc *sc)
   6086 {
   6087 	int i;
   6088 
   6089 	for (i = 0; i < sc->sc_nqueues; i++) {
   6090 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6091 
   6092 #ifdef WM_EVENT_COUNTERS
   6093 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6094 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6095 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6096 #endif /* WM_EVENT_COUNTERS */
   6097 
   6098 		wm_free_rx_buffer(sc, rxq);
   6099 		wm_free_rx_descs(sc, rxq);
   6100 		if (rxq->rxq_lock)
   6101 			mutex_obj_free(rxq->rxq_lock);
   6102 	}
   6103 
   6104 	for (i = 0; i < sc->sc_nqueues; i++) {
   6105 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6106 		struct mbuf *m;
   6107 #ifdef WM_EVENT_COUNTERS
   6108 		int j;
   6109 
   6110 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6111 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6112 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6113 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6114 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6115 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6116 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6117 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6118 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6119 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6120 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6121 
   6122 		for (j = 0; j < WM_NTXSEGS; j++)
   6123 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6124 
   6125 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6126 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6127 #endif /* WM_EVENT_COUNTERS */
   6128 
   6129 		/* drain txq_interq */
   6130 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6131 			m_freem(m);
   6132 		pcq_destroy(txq->txq_interq);
   6133 
   6134 		wm_free_tx_buffer(sc, txq);
   6135 		wm_free_tx_descs(sc, txq);
   6136 		if (txq->txq_lock)
   6137 			mutex_obj_free(txq->txq_lock);
   6138 	}
   6139 
   6140 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6141 }
   6142 
   6143 static void
   6144 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6145 {
   6146 
   6147 	KASSERT(mutex_owned(txq->txq_lock));
   6148 
   6149 	/* Initialize the transmit descriptor ring. */
   6150 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6151 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6152 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6153 	txq->txq_free = WM_NTXDESC(txq);
   6154 	txq->txq_next = 0;
   6155 }
   6156 
   6157 static void
   6158 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6159     struct wm_txqueue *txq)
   6160 {
   6161 
   6162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6163 		device_xname(sc->sc_dev), __func__));
   6164 	KASSERT(mutex_owned(txq->txq_lock));
   6165 
   6166 	if (sc->sc_type < WM_T_82543) {
   6167 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6168 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6169 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6170 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6171 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6172 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6173 	} else {
   6174 		int qid = wmq->wmq_id;
   6175 
   6176 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6177 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6178 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6179 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6180 
   6181 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6182 			/*
   6183 			 * Don't write TDT before TCTL.EN is set.
   6184 			 * See the document.
   6185 			 */
   6186 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6187 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6188 			    | TXDCTL_WTHRESH(0));
   6189 		else {
   6190 			/* XXX should update with AIM? */
   6191 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6192 			if (sc->sc_type >= WM_T_82540) {
   6193 				/* should be same */
   6194 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6195 			}
   6196 
   6197 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6198 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6199 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6200 		}
   6201 	}
   6202 }
   6203 
   6204 static void
   6205 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6206 {
   6207 	int i;
   6208 
   6209 	KASSERT(mutex_owned(txq->txq_lock));
   6210 
   6211 	/* Initialize the transmit job descriptors. */
   6212 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6213 		txq->txq_soft[i].txs_mbuf = NULL;
   6214 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6215 	txq->txq_snext = 0;
   6216 	txq->txq_sdirty = 0;
   6217 }
   6218 
   6219 static void
   6220 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6221     struct wm_txqueue *txq)
   6222 {
   6223 
   6224 	KASSERT(mutex_owned(txq->txq_lock));
   6225 
   6226 	/*
   6227 	 * Set up some register offsets that are different between
   6228 	 * the i82542 and the i82543 and later chips.
   6229 	 */
   6230 	if (sc->sc_type < WM_T_82543)
   6231 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6232 	else
   6233 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6234 
   6235 	wm_init_tx_descs(sc, txq);
   6236 	wm_init_tx_regs(sc, wmq, txq);
   6237 	wm_init_tx_buffer(sc, txq);
   6238 }
   6239 
   6240 static void
   6241 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6242     struct wm_rxqueue *rxq)
   6243 {
   6244 
   6245 	KASSERT(mutex_owned(rxq->rxq_lock));
   6246 
   6247 	/*
   6248 	 * Initialize the receive descriptor and receive job
   6249 	 * descriptor rings.
   6250 	 */
   6251 	if (sc->sc_type < WM_T_82543) {
   6252 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6253 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6254 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6255 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6256 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6257 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6258 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6259 
   6260 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6261 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6262 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6263 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6264 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6265 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6266 	} else {
   6267 		int qid = wmq->wmq_id;
   6268 
   6269 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6270 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6271 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6272 
   6273 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6274 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6275 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6276 
   6277 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6278 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6279 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6280 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6281 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6282 			    | RXDCTL_WTHRESH(1));
   6283 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6284 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6285 		} else {
   6286 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6287 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6288 			/* XXX should update with AIM? */
   6289 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6290 			/* MUST be same */
   6291 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6292 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6293 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6294 		}
   6295 	}
   6296 }
   6297 
   6298 static int
   6299 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6300 {
   6301 	struct wm_rxsoft *rxs;
   6302 	int error, i;
   6303 
   6304 	KASSERT(mutex_owned(rxq->rxq_lock));
   6305 
   6306 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6307 		rxs = &rxq->rxq_soft[i];
   6308 		if (rxs->rxs_mbuf == NULL) {
   6309 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6310 				log(LOG_ERR, "%s: unable to allocate or map "
   6311 				    "rx buffer %d, error = %d\n",
   6312 				    device_xname(sc->sc_dev), i, error);
   6313 				/*
   6314 				 * XXX Should attempt to run with fewer receive
   6315 				 * XXX buffers instead of just failing.
   6316 				 */
   6317 				wm_rxdrain(rxq);
   6318 				return ENOMEM;
   6319 			}
   6320 		} else {
   6321 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6322 				wm_init_rxdesc(rxq, i);
   6323 			/*
   6324 			 * For 82575 and newer device, the RX descriptors
   6325 			 * must be initialized after the setting of RCTL.EN in
   6326 			 * wm_set_filter()
   6327 			 */
   6328 		}
   6329 	}
   6330 	rxq->rxq_ptr = 0;
   6331 	rxq->rxq_discard = 0;
   6332 	WM_RXCHAIN_RESET(rxq);
   6333 
   6334 	return 0;
   6335 }
   6336 
   6337 static int
   6338 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6339     struct wm_rxqueue *rxq)
   6340 {
   6341 
   6342 	KASSERT(mutex_owned(rxq->rxq_lock));
   6343 
   6344 	/*
   6345 	 * Set up some register offsets that are different between
   6346 	 * the i82542 and the i82543 and later chips.
   6347 	 */
   6348 	if (sc->sc_type < WM_T_82543)
   6349 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6350 	else
   6351 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6352 
   6353 	wm_init_rx_regs(sc, wmq, rxq);
   6354 	return wm_init_rx_buffer(sc, rxq);
   6355 }
   6356 
   6357 /*
   6358  * wm_init_quques:
   6359  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6360  */
   6361 static int
   6362 wm_init_txrx_queues(struct wm_softc *sc)
   6363 {
   6364 	int i, error = 0;
   6365 
   6366 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6367 		device_xname(sc->sc_dev), __func__));
   6368 
   6369 	for (i = 0; i < sc->sc_nqueues; i++) {
   6370 		struct wm_queue *wmq = &sc->sc_queue[i];
   6371 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6372 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6373 
   6374 		/*
   6375 		 * TODO
   6376 		 * Currently, use constant variable instead of AIM.
   6377 		 * Furthermore, the interrupt interval of multiqueue which use
   6378 		 * polling mode is less than default value.
   6379 		 * More tuning and AIM are required.
   6380 		 */
   6381 		if (sc->sc_nqueues > 1)
   6382 			wmq->wmq_itr = 50;
   6383 		else
   6384 			wmq->wmq_itr = sc->sc_itr_init;
   6385 		wmq->wmq_set_itr = true;
   6386 
   6387 		mutex_enter(txq->txq_lock);
   6388 		wm_init_tx_queue(sc, wmq, txq);
   6389 		mutex_exit(txq->txq_lock);
   6390 
   6391 		mutex_enter(rxq->rxq_lock);
   6392 		error = wm_init_rx_queue(sc, wmq, rxq);
   6393 		mutex_exit(rxq->rxq_lock);
   6394 		if (error)
   6395 			break;
   6396 	}
   6397 
   6398 	return error;
   6399 }
   6400 
   6401 /*
   6402  * wm_tx_offload:
   6403  *
   6404  *	Set up TCP/IP checksumming parameters for the
   6405  *	specified packet.
   6406  */
   6407 static int
   6408 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6409     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6410 {
   6411 	struct mbuf *m0 = txs->txs_mbuf;
   6412 	struct livengood_tcpip_ctxdesc *t;
   6413 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6414 	uint32_t ipcse;
   6415 	struct ether_header *eh;
   6416 	int offset, iphl;
   6417 	uint8_t fields;
   6418 
   6419 	/*
   6420 	 * XXX It would be nice if the mbuf pkthdr had offset
   6421 	 * fields for the protocol headers.
   6422 	 */
   6423 
   6424 	eh = mtod(m0, struct ether_header *);
   6425 	switch (htons(eh->ether_type)) {
   6426 	case ETHERTYPE_IP:
   6427 	case ETHERTYPE_IPV6:
   6428 		offset = ETHER_HDR_LEN;
   6429 		break;
   6430 
   6431 	case ETHERTYPE_VLAN:
   6432 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6433 		break;
   6434 
   6435 	default:
   6436 		/*
   6437 		 * Don't support this protocol or encapsulation.
   6438 		 */
   6439 		*fieldsp = 0;
   6440 		*cmdp = 0;
   6441 		return 0;
   6442 	}
   6443 
   6444 	if ((m0->m_pkthdr.csum_flags &
   6445 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6446 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6447 	} else {
   6448 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6449 	}
   6450 	ipcse = offset + iphl - 1;
   6451 
   6452 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6453 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6454 	seg = 0;
   6455 	fields = 0;
   6456 
   6457 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6458 		int hlen = offset + iphl;
   6459 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6460 
   6461 		if (__predict_false(m0->m_len <
   6462 				    (hlen + sizeof(struct tcphdr)))) {
   6463 			/*
   6464 			 * TCP/IP headers are not in the first mbuf; we need
   6465 			 * to do this the slow and painful way.  Let's just
   6466 			 * hope this doesn't happen very often.
   6467 			 */
   6468 			struct tcphdr th;
   6469 
   6470 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6471 
   6472 			m_copydata(m0, hlen, sizeof(th), &th);
   6473 			if (v4) {
   6474 				struct ip ip;
   6475 
   6476 				m_copydata(m0, offset, sizeof(ip), &ip);
   6477 				ip.ip_len = 0;
   6478 				m_copyback(m0,
   6479 				    offset + offsetof(struct ip, ip_len),
   6480 				    sizeof(ip.ip_len), &ip.ip_len);
   6481 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6482 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6483 			} else {
   6484 				struct ip6_hdr ip6;
   6485 
   6486 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6487 				ip6.ip6_plen = 0;
   6488 				m_copyback(m0,
   6489 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6490 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6491 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6492 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6493 			}
   6494 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6495 			    sizeof(th.th_sum), &th.th_sum);
   6496 
   6497 			hlen += th.th_off << 2;
   6498 		} else {
   6499 			/*
   6500 			 * TCP/IP headers are in the first mbuf; we can do
   6501 			 * this the easy way.
   6502 			 */
   6503 			struct tcphdr *th;
   6504 
   6505 			if (v4) {
   6506 				struct ip *ip =
   6507 				    (void *)(mtod(m0, char *) + offset);
   6508 				th = (void *)(mtod(m0, char *) + hlen);
   6509 
   6510 				ip->ip_len = 0;
   6511 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6512 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6513 			} else {
   6514 				struct ip6_hdr *ip6 =
   6515 				    (void *)(mtod(m0, char *) + offset);
   6516 				th = (void *)(mtod(m0, char *) + hlen);
   6517 
   6518 				ip6->ip6_plen = 0;
   6519 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6520 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6521 			}
   6522 			hlen += th->th_off << 2;
   6523 		}
   6524 
   6525 		if (v4) {
   6526 			WM_Q_EVCNT_INCR(txq, txtso);
   6527 			cmdlen |= WTX_TCPIP_CMD_IP;
   6528 		} else {
   6529 			WM_Q_EVCNT_INCR(txq, txtso6);
   6530 			ipcse = 0;
   6531 		}
   6532 		cmd |= WTX_TCPIP_CMD_TSE;
   6533 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6534 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6535 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6536 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6537 	}
   6538 
   6539 	/*
   6540 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6541 	 * offload feature, if we load the context descriptor, we
   6542 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6543 	 */
   6544 
   6545 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6546 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6547 	    WTX_TCPIP_IPCSE(ipcse);
   6548 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6549 		WM_Q_EVCNT_INCR(txq, txipsum);
   6550 		fields |= WTX_IXSM;
   6551 	}
   6552 
   6553 	offset += iphl;
   6554 
   6555 	if (m0->m_pkthdr.csum_flags &
   6556 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6557 		WM_Q_EVCNT_INCR(txq, txtusum);
   6558 		fields |= WTX_TXSM;
   6559 		tucs = WTX_TCPIP_TUCSS(offset) |
   6560 		    WTX_TCPIP_TUCSO(offset +
   6561 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6562 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6563 	} else if ((m0->m_pkthdr.csum_flags &
   6564 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6565 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6566 		fields |= WTX_TXSM;
   6567 		tucs = WTX_TCPIP_TUCSS(offset) |
   6568 		    WTX_TCPIP_TUCSO(offset +
   6569 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6570 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6571 	} else {
   6572 		/* Just initialize it to a valid TCP context. */
   6573 		tucs = WTX_TCPIP_TUCSS(offset) |
   6574 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6575 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6576 	}
   6577 
   6578 	/* Fill in the context descriptor. */
   6579 	t = (struct livengood_tcpip_ctxdesc *)
   6580 	    &txq->txq_descs[txq->txq_next];
   6581 	t->tcpip_ipcs = htole32(ipcs);
   6582 	t->tcpip_tucs = htole32(tucs);
   6583 	t->tcpip_cmdlen = htole32(cmdlen);
   6584 	t->tcpip_seg = htole32(seg);
   6585 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6586 
   6587 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6588 	txs->txs_ndesc++;
   6589 
   6590 	*cmdp = cmd;
   6591 	*fieldsp = fields;
   6592 
   6593 	return 0;
   6594 }
   6595 
   6596 static inline int
   6597 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6598 {
   6599 	struct wm_softc *sc = ifp->if_softc;
   6600 	u_int cpuid = cpu_index(curcpu());
   6601 
   6602 	/*
   6603 	 * Currently, simple distribute strategy.
   6604 	 * TODO:
   6605 	 * distribute by flowid(RSS has value).
   6606 	 */
   6607         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6608 }
   6609 
   6610 /*
   6611  * wm_start:		[ifnet interface function]
   6612  *
   6613  *	Start packet transmission on the interface.
   6614  */
   6615 static void
   6616 wm_start(struct ifnet *ifp)
   6617 {
   6618 	struct wm_softc *sc = ifp->if_softc;
   6619 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6620 
   6621 #ifdef WM_MPSAFE
   6622 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6623 #endif
   6624 	/*
   6625 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6626 	 */
   6627 
   6628 	mutex_enter(txq->txq_lock);
   6629 	if (!txq->txq_stopping)
   6630 		wm_start_locked(ifp);
   6631 	mutex_exit(txq->txq_lock);
   6632 }
   6633 
   6634 static void
   6635 wm_start_locked(struct ifnet *ifp)
   6636 {
   6637 	struct wm_softc *sc = ifp->if_softc;
   6638 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6639 
   6640 	wm_send_common_locked(ifp, txq, false);
   6641 }
   6642 
   6643 static int
   6644 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6645 {
   6646 	int qid;
   6647 	struct wm_softc *sc = ifp->if_softc;
   6648 	struct wm_txqueue *txq;
   6649 
   6650 	qid = wm_select_txqueue(ifp, m);
   6651 	txq = &sc->sc_queue[qid].wmq_txq;
   6652 
   6653 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6654 		m_freem(m);
   6655 		WM_Q_EVCNT_INCR(txq, txdrop);
   6656 		return ENOBUFS;
   6657 	}
   6658 
   6659 	/*
   6660 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6661 	 */
   6662 	ifp->if_obytes += m->m_pkthdr.len;
   6663 	if (m->m_flags & M_MCAST)
   6664 		ifp->if_omcasts++;
   6665 
   6666 	if (mutex_tryenter(txq->txq_lock)) {
   6667 		if (!txq->txq_stopping)
   6668 			wm_transmit_locked(ifp, txq);
   6669 		mutex_exit(txq->txq_lock);
   6670 	}
   6671 
   6672 	return 0;
   6673 }
   6674 
   6675 static void
   6676 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6677 {
   6678 
   6679 	wm_send_common_locked(ifp, txq, true);
   6680 }
   6681 
   6682 static void
   6683 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6684     bool is_transmit)
   6685 {
   6686 	struct wm_softc *sc = ifp->if_softc;
   6687 	struct mbuf *m0;
   6688 	struct m_tag *mtag;
   6689 	struct wm_txsoft *txs;
   6690 	bus_dmamap_t dmamap;
   6691 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6692 	bus_addr_t curaddr;
   6693 	bus_size_t seglen, curlen;
   6694 	uint32_t cksumcmd;
   6695 	uint8_t cksumfields;
   6696 
   6697 	KASSERT(mutex_owned(txq->txq_lock));
   6698 
   6699 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6700 		return;
   6701 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6702 		return;
   6703 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6704 		return;
   6705 
   6706 	/* Remember the previous number of free descriptors. */
   6707 	ofree = txq->txq_free;
   6708 
   6709 	/*
   6710 	 * Loop through the send queue, setting up transmit descriptors
   6711 	 * until we drain the queue, or use up all available transmit
   6712 	 * descriptors.
   6713 	 */
   6714 	for (;;) {
   6715 		m0 = NULL;
   6716 
   6717 		/* Get a work queue entry. */
   6718 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6719 			wm_txeof(sc, txq);
   6720 			if (txq->txq_sfree == 0) {
   6721 				DPRINTF(WM_DEBUG_TX,
   6722 				    ("%s: TX: no free job descriptors\n",
   6723 					device_xname(sc->sc_dev)));
   6724 				WM_Q_EVCNT_INCR(txq, txsstall);
   6725 				break;
   6726 			}
   6727 		}
   6728 
   6729 		/* Grab a packet off the queue. */
   6730 		if (is_transmit)
   6731 			m0 = pcq_get(txq->txq_interq);
   6732 		else
   6733 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6734 		if (m0 == NULL)
   6735 			break;
   6736 
   6737 		DPRINTF(WM_DEBUG_TX,
   6738 		    ("%s: TX: have packet to transmit: %p\n",
   6739 		    device_xname(sc->sc_dev), m0));
   6740 
   6741 		txs = &txq->txq_soft[txq->txq_snext];
   6742 		dmamap = txs->txs_dmamap;
   6743 
   6744 		use_tso = (m0->m_pkthdr.csum_flags &
   6745 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6746 
   6747 		/*
   6748 		 * So says the Linux driver:
   6749 		 * The controller does a simple calculation to make sure
   6750 		 * there is enough room in the FIFO before initiating the
   6751 		 * DMA for each buffer.  The calc is:
   6752 		 *	4 = ceil(buffer len / MSS)
   6753 		 * To make sure we don't overrun the FIFO, adjust the max
   6754 		 * buffer len if the MSS drops.
   6755 		 */
   6756 		dmamap->dm_maxsegsz =
   6757 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6758 		    ? m0->m_pkthdr.segsz << 2
   6759 		    : WTX_MAX_LEN;
   6760 
   6761 		/*
   6762 		 * Load the DMA map.  If this fails, the packet either
   6763 		 * didn't fit in the allotted number of segments, or we
   6764 		 * were short on resources.  For the too-many-segments
   6765 		 * case, we simply report an error and drop the packet,
   6766 		 * since we can't sanely copy a jumbo packet to a single
   6767 		 * buffer.
   6768 		 */
   6769 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6770 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6771 		if (error) {
   6772 			if (error == EFBIG) {
   6773 				WM_Q_EVCNT_INCR(txq, txdrop);
   6774 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6775 				    "DMA segments, dropping...\n",
   6776 				    device_xname(sc->sc_dev));
   6777 				wm_dump_mbuf_chain(sc, m0);
   6778 				m_freem(m0);
   6779 				continue;
   6780 			}
   6781 			/*  Short on resources, just stop for now. */
   6782 			DPRINTF(WM_DEBUG_TX,
   6783 			    ("%s: TX: dmamap load failed: %d\n",
   6784 			    device_xname(sc->sc_dev), error));
   6785 			break;
   6786 		}
   6787 
   6788 		segs_needed = dmamap->dm_nsegs;
   6789 		if (use_tso) {
   6790 			/* For sentinel descriptor; see below. */
   6791 			segs_needed++;
   6792 		}
   6793 
   6794 		/*
   6795 		 * Ensure we have enough descriptors free to describe
   6796 		 * the packet.  Note, we always reserve one descriptor
   6797 		 * at the end of the ring due to the semantics of the
   6798 		 * TDT register, plus one more in the event we need
   6799 		 * to load offload context.
   6800 		 */
   6801 		if (segs_needed > txq->txq_free - 2) {
   6802 			/*
   6803 			 * Not enough free descriptors to transmit this
   6804 			 * packet.  We haven't committed anything yet,
   6805 			 * so just unload the DMA map, put the packet
   6806 			 * pack on the queue, and punt.  Notify the upper
   6807 			 * layer that there are no more slots left.
   6808 			 */
   6809 			DPRINTF(WM_DEBUG_TX,
   6810 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6811 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6812 			    segs_needed, txq->txq_free - 1));
   6813 			if (!is_transmit)
   6814 				ifp->if_flags |= IFF_OACTIVE;
   6815 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6816 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6817 			WM_Q_EVCNT_INCR(txq, txdstall);
   6818 			break;
   6819 		}
   6820 
   6821 		/*
   6822 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6823 		 * once we know we can transmit the packet, since we
   6824 		 * do some internal FIFO space accounting here.
   6825 		 */
   6826 		if (sc->sc_type == WM_T_82547 &&
   6827 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6828 			DPRINTF(WM_DEBUG_TX,
   6829 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6830 			    device_xname(sc->sc_dev)));
   6831 			if (!is_transmit)
   6832 				ifp->if_flags |= IFF_OACTIVE;
   6833 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6834 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6835 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6836 			break;
   6837 		}
   6838 
   6839 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6840 
   6841 		DPRINTF(WM_DEBUG_TX,
   6842 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6843 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6844 
   6845 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6846 
   6847 		/*
   6848 		 * Store a pointer to the packet so that we can free it
   6849 		 * later.
   6850 		 *
   6851 		 * Initially, we consider the number of descriptors the
   6852 		 * packet uses the number of DMA segments.  This may be
   6853 		 * incremented by 1 if we do checksum offload (a descriptor
   6854 		 * is used to set the checksum context).
   6855 		 */
   6856 		txs->txs_mbuf = m0;
   6857 		txs->txs_firstdesc = txq->txq_next;
   6858 		txs->txs_ndesc = segs_needed;
   6859 
   6860 		/* Set up offload parameters for this packet. */
   6861 		if (m0->m_pkthdr.csum_flags &
   6862 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6863 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6864 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6865 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6866 					  &cksumfields) != 0) {
   6867 				/* Error message already displayed. */
   6868 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6869 				continue;
   6870 			}
   6871 		} else {
   6872 			cksumcmd = 0;
   6873 			cksumfields = 0;
   6874 		}
   6875 
   6876 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6877 
   6878 		/* Sync the DMA map. */
   6879 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6880 		    BUS_DMASYNC_PREWRITE);
   6881 
   6882 		/* Initialize the transmit descriptor. */
   6883 		for (nexttx = txq->txq_next, seg = 0;
   6884 		     seg < dmamap->dm_nsegs; seg++) {
   6885 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6886 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6887 			     seglen != 0;
   6888 			     curaddr += curlen, seglen -= curlen,
   6889 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6890 				curlen = seglen;
   6891 
   6892 				/*
   6893 				 * So says the Linux driver:
   6894 				 * Work around for premature descriptor
   6895 				 * write-backs in TSO mode.  Append a
   6896 				 * 4-byte sentinel descriptor.
   6897 				 */
   6898 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6899 				    curlen > 8)
   6900 					curlen -= 4;
   6901 
   6902 				wm_set_dma_addr(
   6903 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6904 				txq->txq_descs[nexttx].wtx_cmdlen
   6905 				    = htole32(cksumcmd | curlen);
   6906 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6907 				    = 0;
   6908 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6909 				    = cksumfields;
   6910 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6911 				lasttx = nexttx;
   6912 
   6913 				DPRINTF(WM_DEBUG_TX,
   6914 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6915 				     "len %#04zx\n",
   6916 				    device_xname(sc->sc_dev), nexttx,
   6917 				    (uint64_t)curaddr, curlen));
   6918 			}
   6919 		}
   6920 
   6921 		KASSERT(lasttx != -1);
   6922 
   6923 		/*
   6924 		 * Set up the command byte on the last descriptor of
   6925 		 * the packet.  If we're in the interrupt delay window,
   6926 		 * delay the interrupt.
   6927 		 */
   6928 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6929 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6930 
   6931 		/*
   6932 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6933 		 * up the descriptor to encapsulate the packet for us.
   6934 		 *
   6935 		 * This is only valid on the last descriptor of the packet.
   6936 		 */
   6937 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6938 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6939 			    htole32(WTX_CMD_VLE);
   6940 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6941 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6942 		}
   6943 
   6944 		txs->txs_lastdesc = lasttx;
   6945 
   6946 		DPRINTF(WM_DEBUG_TX,
   6947 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6948 		    device_xname(sc->sc_dev),
   6949 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6950 
   6951 		/* Sync the descriptors we're using. */
   6952 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6953 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6954 
   6955 		/* Give the packet to the chip. */
   6956 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6957 
   6958 		DPRINTF(WM_DEBUG_TX,
   6959 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6960 
   6961 		DPRINTF(WM_DEBUG_TX,
   6962 		    ("%s: TX: finished transmitting packet, job %d\n",
   6963 		    device_xname(sc->sc_dev), txq->txq_snext));
   6964 
   6965 		/* Advance the tx pointer. */
   6966 		txq->txq_free -= txs->txs_ndesc;
   6967 		txq->txq_next = nexttx;
   6968 
   6969 		txq->txq_sfree--;
   6970 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6971 
   6972 		/* Pass the packet to any BPF listeners. */
   6973 		bpf_mtap(ifp, m0);
   6974 	}
   6975 
   6976 	if (m0 != NULL) {
   6977 		if (!is_transmit)
   6978 			ifp->if_flags |= IFF_OACTIVE;
   6979 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6980 		WM_Q_EVCNT_INCR(txq, txdrop);
   6981 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6982 			__func__));
   6983 		m_freem(m0);
   6984 	}
   6985 
   6986 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6987 		/* No more slots; notify upper layer. */
   6988 		if (!is_transmit)
   6989 			ifp->if_flags |= IFF_OACTIVE;
   6990 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6991 	}
   6992 
   6993 	if (txq->txq_free != ofree) {
   6994 		/* Set a watchdog timer in case the chip flakes out. */
   6995 		ifp->if_timer = 5;
   6996 	}
   6997 }
   6998 
   6999 /*
   7000  * wm_nq_tx_offload:
   7001  *
   7002  *	Set up TCP/IP checksumming parameters for the
   7003  *	specified packet, for NEWQUEUE devices
   7004  */
   7005 static int
   7006 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7007     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7008 {
   7009 	struct mbuf *m0 = txs->txs_mbuf;
   7010 	struct m_tag *mtag;
   7011 	uint32_t vl_len, mssidx, cmdc;
   7012 	struct ether_header *eh;
   7013 	int offset, iphl;
   7014 
   7015 	/*
   7016 	 * XXX It would be nice if the mbuf pkthdr had offset
   7017 	 * fields for the protocol headers.
   7018 	 */
   7019 	*cmdlenp = 0;
   7020 	*fieldsp = 0;
   7021 
   7022 	eh = mtod(m0, struct ether_header *);
   7023 	switch (htons(eh->ether_type)) {
   7024 	case ETHERTYPE_IP:
   7025 	case ETHERTYPE_IPV6:
   7026 		offset = ETHER_HDR_LEN;
   7027 		break;
   7028 
   7029 	case ETHERTYPE_VLAN:
   7030 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7031 		break;
   7032 
   7033 	default:
   7034 		/* Don't support this protocol or encapsulation. */
   7035 		*do_csum = false;
   7036 		return 0;
   7037 	}
   7038 	*do_csum = true;
   7039 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7040 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7041 
   7042 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7043 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7044 
   7045 	if ((m0->m_pkthdr.csum_flags &
   7046 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7047 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7048 	} else {
   7049 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7050 	}
   7051 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7052 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7053 
   7054 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7055 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7056 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7057 		*cmdlenp |= NQTX_CMD_VLE;
   7058 	}
   7059 
   7060 	mssidx = 0;
   7061 
   7062 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7063 		int hlen = offset + iphl;
   7064 		int tcp_hlen;
   7065 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7066 
   7067 		if (__predict_false(m0->m_len <
   7068 				    (hlen + sizeof(struct tcphdr)))) {
   7069 			/*
   7070 			 * TCP/IP headers are not in the first mbuf; we need
   7071 			 * to do this the slow and painful way.  Let's just
   7072 			 * hope this doesn't happen very often.
   7073 			 */
   7074 			struct tcphdr th;
   7075 
   7076 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7077 
   7078 			m_copydata(m0, hlen, sizeof(th), &th);
   7079 			if (v4) {
   7080 				struct ip ip;
   7081 
   7082 				m_copydata(m0, offset, sizeof(ip), &ip);
   7083 				ip.ip_len = 0;
   7084 				m_copyback(m0,
   7085 				    offset + offsetof(struct ip, ip_len),
   7086 				    sizeof(ip.ip_len), &ip.ip_len);
   7087 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7088 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7089 			} else {
   7090 				struct ip6_hdr ip6;
   7091 
   7092 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7093 				ip6.ip6_plen = 0;
   7094 				m_copyback(m0,
   7095 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7096 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7097 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7098 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7099 			}
   7100 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7101 			    sizeof(th.th_sum), &th.th_sum);
   7102 
   7103 			tcp_hlen = th.th_off << 2;
   7104 		} else {
   7105 			/*
   7106 			 * TCP/IP headers are in the first mbuf; we can do
   7107 			 * this the easy way.
   7108 			 */
   7109 			struct tcphdr *th;
   7110 
   7111 			if (v4) {
   7112 				struct ip *ip =
   7113 				    (void *)(mtod(m0, char *) + offset);
   7114 				th = (void *)(mtod(m0, char *) + hlen);
   7115 
   7116 				ip->ip_len = 0;
   7117 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7118 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7119 			} else {
   7120 				struct ip6_hdr *ip6 =
   7121 				    (void *)(mtod(m0, char *) + offset);
   7122 				th = (void *)(mtod(m0, char *) + hlen);
   7123 
   7124 				ip6->ip6_plen = 0;
   7125 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7126 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7127 			}
   7128 			tcp_hlen = th->th_off << 2;
   7129 		}
   7130 		hlen += tcp_hlen;
   7131 		*cmdlenp |= NQTX_CMD_TSE;
   7132 
   7133 		if (v4) {
   7134 			WM_Q_EVCNT_INCR(txq, txtso);
   7135 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7136 		} else {
   7137 			WM_Q_EVCNT_INCR(txq, txtso6);
   7138 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7139 		}
   7140 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7141 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7142 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7143 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7144 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7145 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7146 	} else {
   7147 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7148 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7149 	}
   7150 
   7151 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7152 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7153 		cmdc |= NQTXC_CMD_IP4;
   7154 	}
   7155 
   7156 	if (m0->m_pkthdr.csum_flags &
   7157 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7158 		WM_Q_EVCNT_INCR(txq, txtusum);
   7159 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7160 			cmdc |= NQTXC_CMD_TCP;
   7161 		} else {
   7162 			cmdc |= NQTXC_CMD_UDP;
   7163 		}
   7164 		cmdc |= NQTXC_CMD_IP4;
   7165 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7166 	}
   7167 	if (m0->m_pkthdr.csum_flags &
   7168 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7169 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7170 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7171 			cmdc |= NQTXC_CMD_TCP;
   7172 		} else {
   7173 			cmdc |= NQTXC_CMD_UDP;
   7174 		}
   7175 		cmdc |= NQTXC_CMD_IP6;
   7176 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7177 	}
   7178 
   7179 	/* Fill in the context descriptor. */
   7180 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7181 	    htole32(vl_len);
   7182 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7183 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7184 	    htole32(cmdc);
   7185 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7186 	    htole32(mssidx);
   7187 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7188 	DPRINTF(WM_DEBUG_TX,
   7189 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7190 	    txq->txq_next, 0, vl_len));
   7191 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7192 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7193 	txs->txs_ndesc++;
   7194 	return 0;
   7195 }
   7196 
   7197 /*
   7198  * wm_nq_start:		[ifnet interface function]
   7199  *
   7200  *	Start packet transmission on the interface for NEWQUEUE devices
   7201  */
   7202 static void
   7203 wm_nq_start(struct ifnet *ifp)
   7204 {
   7205 	struct wm_softc *sc = ifp->if_softc;
   7206 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7207 
   7208 #ifdef WM_MPSAFE
   7209 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7210 #endif
   7211 	/*
   7212 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7213 	 */
   7214 
   7215 	mutex_enter(txq->txq_lock);
   7216 	if (!txq->txq_stopping)
   7217 		wm_nq_start_locked(ifp);
   7218 	mutex_exit(txq->txq_lock);
   7219 }
   7220 
   7221 static void
   7222 wm_nq_start_locked(struct ifnet *ifp)
   7223 {
   7224 	struct wm_softc *sc = ifp->if_softc;
   7225 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7226 
   7227 	wm_nq_send_common_locked(ifp, txq, false);
   7228 }
   7229 
   7230 static int
   7231 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7232 {
   7233 	int qid;
   7234 	struct wm_softc *sc = ifp->if_softc;
   7235 	struct wm_txqueue *txq;
   7236 
   7237 	qid = wm_select_txqueue(ifp, m);
   7238 	txq = &sc->sc_queue[qid].wmq_txq;
   7239 
   7240 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7241 		m_freem(m);
   7242 		WM_Q_EVCNT_INCR(txq, txdrop);
   7243 		return ENOBUFS;
   7244 	}
   7245 
   7246 	/*
   7247 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7248 	 */
   7249 	ifp->if_obytes += m->m_pkthdr.len;
   7250 	if (m->m_flags & M_MCAST)
   7251 		ifp->if_omcasts++;
   7252 
   7253 	/*
   7254 	 * The situations which this mutex_tryenter() fails at running time
   7255 	 * are below two patterns.
   7256 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7257 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7258 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7259 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7260 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7261 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7262 	 */
   7263 	if (mutex_tryenter(txq->txq_lock)) {
   7264 		if (!txq->txq_stopping)
   7265 			wm_nq_transmit_locked(ifp, txq);
   7266 		mutex_exit(txq->txq_lock);
   7267 	}
   7268 
   7269 	return 0;
   7270 }
   7271 
   7272 static void
   7273 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7274 {
   7275 
   7276 	wm_nq_send_common_locked(ifp, txq, true);
   7277 }
   7278 
   7279 static void
   7280 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7281     bool is_transmit)
   7282 {
   7283 	struct wm_softc *sc = ifp->if_softc;
   7284 	struct mbuf *m0;
   7285 	struct m_tag *mtag;
   7286 	struct wm_txsoft *txs;
   7287 	bus_dmamap_t dmamap;
   7288 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7289 	bool do_csum, sent;
   7290 
   7291 	KASSERT(mutex_owned(txq->txq_lock));
   7292 
   7293 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7294 		return;
   7295 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7296 		return;
   7297 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7298 		return;
   7299 
   7300 	sent = false;
   7301 
   7302 	/*
   7303 	 * Loop through the send queue, setting up transmit descriptors
   7304 	 * until we drain the queue, or use up all available transmit
   7305 	 * descriptors.
   7306 	 */
   7307 	for (;;) {
   7308 		m0 = NULL;
   7309 
   7310 		/* Get a work queue entry. */
   7311 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7312 			wm_txeof(sc, txq);
   7313 			if (txq->txq_sfree == 0) {
   7314 				DPRINTF(WM_DEBUG_TX,
   7315 				    ("%s: TX: no free job descriptors\n",
   7316 					device_xname(sc->sc_dev)));
   7317 				WM_Q_EVCNT_INCR(txq, txsstall);
   7318 				break;
   7319 			}
   7320 		}
   7321 
   7322 		/* Grab a packet off the queue. */
   7323 		if (is_transmit)
   7324 			m0 = pcq_get(txq->txq_interq);
   7325 		else
   7326 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7327 		if (m0 == NULL)
   7328 			break;
   7329 
   7330 		DPRINTF(WM_DEBUG_TX,
   7331 		    ("%s: TX: have packet to transmit: %p\n",
   7332 		    device_xname(sc->sc_dev), m0));
   7333 
   7334 		txs = &txq->txq_soft[txq->txq_snext];
   7335 		dmamap = txs->txs_dmamap;
   7336 
   7337 		/*
   7338 		 * Load the DMA map.  If this fails, the packet either
   7339 		 * didn't fit in the allotted number of segments, or we
   7340 		 * were short on resources.  For the too-many-segments
   7341 		 * case, we simply report an error and drop the packet,
   7342 		 * since we can't sanely copy a jumbo packet to a single
   7343 		 * buffer.
   7344 		 */
   7345 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7346 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7347 		if (error) {
   7348 			if (error == EFBIG) {
   7349 				WM_Q_EVCNT_INCR(txq, txdrop);
   7350 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7351 				    "DMA segments, dropping...\n",
   7352 				    device_xname(sc->sc_dev));
   7353 				wm_dump_mbuf_chain(sc, m0);
   7354 				m_freem(m0);
   7355 				continue;
   7356 			}
   7357 			/* Short on resources, just stop for now. */
   7358 			DPRINTF(WM_DEBUG_TX,
   7359 			    ("%s: TX: dmamap load failed: %d\n",
   7360 			    device_xname(sc->sc_dev), error));
   7361 			break;
   7362 		}
   7363 
   7364 		segs_needed = dmamap->dm_nsegs;
   7365 
   7366 		/*
   7367 		 * Ensure we have enough descriptors free to describe
   7368 		 * the packet.  Note, we always reserve one descriptor
   7369 		 * at the end of the ring due to the semantics of the
   7370 		 * TDT register, plus one more in the event we need
   7371 		 * to load offload context.
   7372 		 */
   7373 		if (segs_needed > txq->txq_free - 2) {
   7374 			/*
   7375 			 * Not enough free descriptors to transmit this
   7376 			 * packet.  We haven't committed anything yet,
   7377 			 * so just unload the DMA map, put the packet
   7378 			 * pack on the queue, and punt.  Notify the upper
   7379 			 * layer that there are no more slots left.
   7380 			 */
   7381 			DPRINTF(WM_DEBUG_TX,
   7382 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7383 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7384 			    segs_needed, txq->txq_free - 1));
   7385 			if (!is_transmit)
   7386 				ifp->if_flags |= IFF_OACTIVE;
   7387 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7388 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7389 			WM_Q_EVCNT_INCR(txq, txdstall);
   7390 			break;
   7391 		}
   7392 
   7393 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7394 
   7395 		DPRINTF(WM_DEBUG_TX,
   7396 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7397 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7398 
   7399 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7400 
   7401 		/*
   7402 		 * Store a pointer to the packet so that we can free it
   7403 		 * later.
   7404 		 *
   7405 		 * Initially, we consider the number of descriptors the
   7406 		 * packet uses the number of DMA segments.  This may be
   7407 		 * incremented by 1 if we do checksum offload (a descriptor
   7408 		 * is used to set the checksum context).
   7409 		 */
   7410 		txs->txs_mbuf = m0;
   7411 		txs->txs_firstdesc = txq->txq_next;
   7412 		txs->txs_ndesc = segs_needed;
   7413 
   7414 		/* Set up offload parameters for this packet. */
   7415 		uint32_t cmdlen, fields, dcmdlen;
   7416 		if (m0->m_pkthdr.csum_flags &
   7417 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7418 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7419 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7420 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7421 			    &do_csum) != 0) {
   7422 				/* Error message already displayed. */
   7423 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7424 				continue;
   7425 			}
   7426 		} else {
   7427 			do_csum = false;
   7428 			cmdlen = 0;
   7429 			fields = 0;
   7430 		}
   7431 
   7432 		/* Sync the DMA map. */
   7433 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7434 		    BUS_DMASYNC_PREWRITE);
   7435 
   7436 		/* Initialize the first transmit descriptor. */
   7437 		nexttx = txq->txq_next;
   7438 		if (!do_csum) {
   7439 			/* setup a legacy descriptor */
   7440 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7441 			    dmamap->dm_segs[0].ds_addr);
   7442 			txq->txq_descs[nexttx].wtx_cmdlen =
   7443 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7444 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7445 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7446 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7447 			    NULL) {
   7448 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7449 				    htole32(WTX_CMD_VLE);
   7450 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7451 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7452 			} else {
   7453 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7454 			}
   7455 			dcmdlen = 0;
   7456 		} else {
   7457 			/* setup an advanced data descriptor */
   7458 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7459 			    htole64(dmamap->dm_segs[0].ds_addr);
   7460 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7461 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7462 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7463 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7464 			    htole32(fields);
   7465 			DPRINTF(WM_DEBUG_TX,
   7466 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7467 			    device_xname(sc->sc_dev), nexttx,
   7468 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7469 			DPRINTF(WM_DEBUG_TX,
   7470 			    ("\t 0x%08x%08x\n", fields,
   7471 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7472 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7473 		}
   7474 
   7475 		lasttx = nexttx;
   7476 		nexttx = WM_NEXTTX(txq, nexttx);
   7477 		/*
   7478 		 * fill in the next descriptors. legacy or adcanced format
   7479 		 * is the same here
   7480 		 */
   7481 		for (seg = 1; seg < dmamap->dm_nsegs;
   7482 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7483 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7484 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7485 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7486 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7487 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7488 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7489 			lasttx = nexttx;
   7490 
   7491 			DPRINTF(WM_DEBUG_TX,
   7492 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7493 			     "len %#04zx\n",
   7494 			    device_xname(sc->sc_dev), nexttx,
   7495 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7496 			    dmamap->dm_segs[seg].ds_len));
   7497 		}
   7498 
   7499 		KASSERT(lasttx != -1);
   7500 
   7501 		/*
   7502 		 * Set up the command byte on the last descriptor of
   7503 		 * the packet.  If we're in the interrupt delay window,
   7504 		 * delay the interrupt.
   7505 		 */
   7506 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7507 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7508 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7509 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7510 
   7511 		txs->txs_lastdesc = lasttx;
   7512 
   7513 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7514 		    device_xname(sc->sc_dev),
   7515 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7516 
   7517 		/* Sync the descriptors we're using. */
   7518 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7519 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7520 
   7521 		/* Give the packet to the chip. */
   7522 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7523 		sent = true;
   7524 
   7525 		DPRINTF(WM_DEBUG_TX,
   7526 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7527 
   7528 		DPRINTF(WM_DEBUG_TX,
   7529 		    ("%s: TX: finished transmitting packet, job %d\n",
   7530 		    device_xname(sc->sc_dev), txq->txq_snext));
   7531 
   7532 		/* Advance the tx pointer. */
   7533 		txq->txq_free -= txs->txs_ndesc;
   7534 		txq->txq_next = nexttx;
   7535 
   7536 		txq->txq_sfree--;
   7537 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7538 
   7539 		/* Pass the packet to any BPF listeners. */
   7540 		bpf_mtap(ifp, m0);
   7541 	}
   7542 
   7543 	if (m0 != NULL) {
   7544 		if (!is_transmit)
   7545 			ifp->if_flags |= IFF_OACTIVE;
   7546 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7547 		WM_Q_EVCNT_INCR(txq, txdrop);
   7548 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7549 			__func__));
   7550 		m_freem(m0);
   7551 	}
   7552 
   7553 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7554 		/* No more slots; notify upper layer. */
   7555 		if (!is_transmit)
   7556 			ifp->if_flags |= IFF_OACTIVE;
   7557 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7558 	}
   7559 
   7560 	if (sent) {
   7561 		/* Set a watchdog timer in case the chip flakes out. */
   7562 		ifp->if_timer = 5;
   7563 	}
   7564 }
   7565 
   7566 static void
   7567 wm_deferred_start_locked(struct wm_txqueue *txq)
   7568 {
   7569 	struct wm_softc *sc = txq->txq_sc;
   7570 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7571 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7572 	int qid = wmq->wmq_id;
   7573 
   7574 	KASSERT(mutex_owned(txq->txq_lock));
   7575 
   7576 	if (txq->txq_stopping) {
   7577 		mutex_exit(txq->txq_lock);
   7578 		return;
   7579 	}
   7580 
   7581 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7582 		/* XXX need for ALTQ */
   7583 		if (qid == 0)
   7584 			wm_nq_start_locked(ifp);
   7585 		wm_nq_transmit_locked(ifp, txq);
   7586 	} else {
   7587 		/* XXX need for ALTQ */
   7588 		if (qid == 0)
   7589 			wm_start_locked(ifp);
   7590 		wm_transmit_locked(ifp, txq);
   7591 	}
   7592 }
   7593 
   7594 /* Interrupt */
   7595 
   7596 /*
   7597  * wm_txeof:
   7598  *
   7599  *	Helper; handle transmit interrupts.
   7600  */
   7601 static int
   7602 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7603 {
   7604 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7605 	struct wm_txsoft *txs;
   7606 	bool processed = false;
   7607 	int count = 0;
   7608 	int i;
   7609 	uint8_t status;
   7610 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7611 
   7612 	KASSERT(mutex_owned(txq->txq_lock));
   7613 
   7614 	if (txq->txq_stopping)
   7615 		return 0;
   7616 
   7617 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7618 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7619 	if (wmq->wmq_id == 0)
   7620 		ifp->if_flags &= ~IFF_OACTIVE;
   7621 
   7622 	/*
   7623 	 * Go through the Tx list and free mbufs for those
   7624 	 * frames which have been transmitted.
   7625 	 */
   7626 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7627 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7628 		txs = &txq->txq_soft[i];
   7629 
   7630 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7631 			device_xname(sc->sc_dev), i));
   7632 
   7633 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7634 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7635 
   7636 		status =
   7637 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7638 		if ((status & WTX_ST_DD) == 0) {
   7639 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7640 			    BUS_DMASYNC_PREREAD);
   7641 			break;
   7642 		}
   7643 
   7644 		processed = true;
   7645 		count++;
   7646 		DPRINTF(WM_DEBUG_TX,
   7647 		    ("%s: TX: job %d done: descs %d..%d\n",
   7648 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7649 		    txs->txs_lastdesc));
   7650 
   7651 		/*
   7652 		 * XXX We should probably be using the statistics
   7653 		 * XXX registers, but I don't know if they exist
   7654 		 * XXX on chips before the i82544.
   7655 		 */
   7656 
   7657 #ifdef WM_EVENT_COUNTERS
   7658 		if (status & WTX_ST_TU)
   7659 			WM_Q_EVCNT_INCR(txq, tu);
   7660 #endif /* WM_EVENT_COUNTERS */
   7661 
   7662 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7663 			ifp->if_oerrors++;
   7664 			if (status & WTX_ST_LC)
   7665 				log(LOG_WARNING, "%s: late collision\n",
   7666 				    device_xname(sc->sc_dev));
   7667 			else if (status & WTX_ST_EC) {
   7668 				ifp->if_collisions += 16;
   7669 				log(LOG_WARNING, "%s: excessive collisions\n",
   7670 				    device_xname(sc->sc_dev));
   7671 			}
   7672 		} else
   7673 			ifp->if_opackets++;
   7674 
   7675 		txq->txq_packets++;
   7676 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7677 
   7678 		txq->txq_free += txs->txs_ndesc;
   7679 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7680 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7681 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7682 		m_freem(txs->txs_mbuf);
   7683 		txs->txs_mbuf = NULL;
   7684 	}
   7685 
   7686 	/* Update the dirty transmit buffer pointer. */
   7687 	txq->txq_sdirty = i;
   7688 	DPRINTF(WM_DEBUG_TX,
   7689 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7690 
   7691 	if (count != 0)
   7692 		rnd_add_uint32(&sc->rnd_source, count);
   7693 
   7694 	/*
   7695 	 * If there are no more pending transmissions, cancel the watchdog
   7696 	 * timer.
   7697 	 */
   7698 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7699 		ifp->if_timer = 0;
   7700 
   7701 	return processed;
   7702 }
   7703 
   7704 static inline uint32_t
   7705 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7706 {
   7707 	struct wm_softc *sc = rxq->rxq_sc;
   7708 
   7709 	if (sc->sc_type == WM_T_82574)
   7710 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7711 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7712 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7713 	else
   7714 		return rxq->rxq_descs[idx].wrx_status;
   7715 }
   7716 
   7717 static inline uint32_t
   7718 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7719 {
   7720 	struct wm_softc *sc = rxq->rxq_sc;
   7721 
   7722 	if (sc->sc_type == WM_T_82574)
   7723 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7724 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7725 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7726 	else
   7727 		return rxq->rxq_descs[idx].wrx_errors;
   7728 }
   7729 
   7730 static inline uint16_t
   7731 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7732 {
   7733 	struct wm_softc *sc = rxq->rxq_sc;
   7734 
   7735 	if (sc->sc_type == WM_T_82574)
   7736 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7737 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7738 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7739 	else
   7740 		return rxq->rxq_descs[idx].wrx_special;
   7741 }
   7742 
   7743 static inline int
   7744 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7745 {
   7746 	struct wm_softc *sc = rxq->rxq_sc;
   7747 
   7748 	if (sc->sc_type == WM_T_82574)
   7749 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7750 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7751 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7752 	else
   7753 		return rxq->rxq_descs[idx].wrx_len;
   7754 }
   7755 
   7756 #ifdef WM_DEBUG
   7757 static inline uint32_t
   7758 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7759 {
   7760 	struct wm_softc *sc = rxq->rxq_sc;
   7761 
   7762 	if (sc->sc_type == WM_T_82574)
   7763 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7764 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7765 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7766 	else
   7767 		return 0;
   7768 }
   7769 
   7770 static inline uint8_t
   7771 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7772 {
   7773 	struct wm_softc *sc = rxq->rxq_sc;
   7774 
   7775 	if (sc->sc_type == WM_T_82574)
   7776 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7777 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7778 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7779 	else
   7780 		return 0;
   7781 }
   7782 #endif /* WM_DEBUG */
   7783 
   7784 static inline bool
   7785 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7786     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7787 {
   7788 
   7789 	if (sc->sc_type == WM_T_82574)
   7790 		return (status & ext_bit) != 0;
   7791 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7792 		return (status & nq_bit) != 0;
   7793 	else
   7794 		return (status & legacy_bit) != 0;
   7795 }
   7796 
   7797 static inline bool
   7798 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7799     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7800 {
   7801 
   7802 	if (sc->sc_type == WM_T_82574)
   7803 		return (error & ext_bit) != 0;
   7804 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7805 		return (error & nq_bit) != 0;
   7806 	else
   7807 		return (error & legacy_bit) != 0;
   7808 }
   7809 
   7810 static inline bool
   7811 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7812 {
   7813 
   7814 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7815 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7816 		return true;
   7817 	else
   7818 		return false;
   7819 }
   7820 
   7821 static inline bool
   7822 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7823 {
   7824 	struct wm_softc *sc = rxq->rxq_sc;
   7825 
   7826 	/* XXXX missing error bit for newqueue? */
   7827 	if (wm_rxdesc_is_set_error(sc, errors,
   7828 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7829 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7830 		NQRXC_ERROR_RXE)) {
   7831 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7832 			log(LOG_WARNING, "%s: symbol error\n",
   7833 			    device_xname(sc->sc_dev));
   7834 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7835 			log(LOG_WARNING, "%s: receive sequence error\n",
   7836 			    device_xname(sc->sc_dev));
   7837 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7838 			log(LOG_WARNING, "%s: CRC error\n",
   7839 			    device_xname(sc->sc_dev));
   7840 		return true;
   7841 	}
   7842 
   7843 	return false;
   7844 }
   7845 
   7846 static inline bool
   7847 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7848 {
   7849 	struct wm_softc *sc = rxq->rxq_sc;
   7850 
   7851 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7852 		NQRXC_STATUS_DD)) {
   7853 		/* We have processed all of the receive descriptors. */
   7854 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7855 		return false;
   7856 	}
   7857 
   7858 	return true;
   7859 }
   7860 
   7861 static inline bool
   7862 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7863     struct mbuf *m)
   7864 {
   7865 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7866 
   7867 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7868 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7869 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7870 	}
   7871 
   7872 	return true;
   7873 }
   7874 
   7875 static inline void
   7876 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7877     uint32_t errors, struct mbuf *m)
   7878 {
   7879 	struct wm_softc *sc = rxq->rxq_sc;
   7880 
   7881 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7882 		if (wm_rxdesc_is_set_status(sc, status,
   7883 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7884 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7885 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7886 			if (wm_rxdesc_is_set_error(sc, errors,
   7887 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7888 				m->m_pkthdr.csum_flags |=
   7889 					M_CSUM_IPv4_BAD;
   7890 		}
   7891 		if (wm_rxdesc_is_set_status(sc, status,
   7892 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7893 			/*
   7894 			 * Note: we don't know if this was TCP or UDP,
   7895 			 * so we just set both bits, and expect the
   7896 			 * upper layers to deal.
   7897 			 */
   7898 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7899 			m->m_pkthdr.csum_flags |=
   7900 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7901 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7902 			if (wm_rxdesc_is_set_error(sc, errors,
   7903 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7904 				m->m_pkthdr.csum_flags |=
   7905 					M_CSUM_TCP_UDP_BAD;
   7906 		}
   7907 	}
   7908 }
   7909 
   7910 /*
   7911  * wm_rxeof:
   7912  *
   7913  *	Helper; handle receive interrupts.
   7914  */
   7915 static void
   7916 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7917 {
   7918 	struct wm_softc *sc = rxq->rxq_sc;
   7919 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7920 	struct wm_rxsoft *rxs;
   7921 	struct mbuf *m;
   7922 	int i, len;
   7923 	int count = 0;
   7924 	uint32_t status, errors;
   7925 	uint16_t vlantag;
   7926 
   7927 	KASSERT(mutex_owned(rxq->rxq_lock));
   7928 
   7929 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7930 		if (limit-- == 0) {
   7931 			rxq->rxq_ptr = i;
   7932 			break;
   7933 		}
   7934 
   7935 		rxs = &rxq->rxq_soft[i];
   7936 
   7937 		DPRINTF(WM_DEBUG_RX,
   7938 		    ("%s: RX: checking descriptor %d\n",
   7939 		    device_xname(sc->sc_dev), i));
   7940 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7941 
   7942 		status = wm_rxdesc_get_status(rxq, i);
   7943 		errors = wm_rxdesc_get_errors(rxq, i);
   7944 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7945 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7946 #ifdef WM_DEBUG
   7947 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7948 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7949 #endif
   7950 
   7951 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7952 			/*
   7953 			 * Update the receive pointer holding rxq_lock
   7954 			 * consistent with increment counter.
   7955 			 */
   7956 			rxq->rxq_ptr = i;
   7957 			break;
   7958 		}
   7959 
   7960 		count++;
   7961 		if (__predict_false(rxq->rxq_discard)) {
   7962 			DPRINTF(WM_DEBUG_RX,
   7963 			    ("%s: RX: discarding contents of descriptor %d\n",
   7964 			    device_xname(sc->sc_dev), i));
   7965 			wm_init_rxdesc(rxq, i);
   7966 			if (wm_rxdesc_is_eop(rxq, status)) {
   7967 				/* Reset our state. */
   7968 				DPRINTF(WM_DEBUG_RX,
   7969 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7970 				    device_xname(sc->sc_dev)));
   7971 				rxq->rxq_discard = 0;
   7972 			}
   7973 			continue;
   7974 		}
   7975 
   7976 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7977 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7978 
   7979 		m = rxs->rxs_mbuf;
   7980 
   7981 		/*
   7982 		 * Add a new receive buffer to the ring, unless of
   7983 		 * course the length is zero. Treat the latter as a
   7984 		 * failed mapping.
   7985 		 */
   7986 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7987 			/*
   7988 			 * Failed, throw away what we've done so
   7989 			 * far, and discard the rest of the packet.
   7990 			 */
   7991 			ifp->if_ierrors++;
   7992 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7993 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7994 			wm_init_rxdesc(rxq, i);
   7995 			if (!wm_rxdesc_is_eop(rxq, status))
   7996 				rxq->rxq_discard = 1;
   7997 			if (rxq->rxq_head != NULL)
   7998 				m_freem(rxq->rxq_head);
   7999 			WM_RXCHAIN_RESET(rxq);
   8000 			DPRINTF(WM_DEBUG_RX,
   8001 			    ("%s: RX: Rx buffer allocation failed, "
   8002 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8003 			    rxq->rxq_discard ? " (discard)" : ""));
   8004 			continue;
   8005 		}
   8006 
   8007 		m->m_len = len;
   8008 		rxq->rxq_len += len;
   8009 		DPRINTF(WM_DEBUG_RX,
   8010 		    ("%s: RX: buffer at %p len %d\n",
   8011 		    device_xname(sc->sc_dev), m->m_data, len));
   8012 
   8013 		/* If this is not the end of the packet, keep looking. */
   8014 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8015 			WM_RXCHAIN_LINK(rxq, m);
   8016 			DPRINTF(WM_DEBUG_RX,
   8017 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8018 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8019 			continue;
   8020 		}
   8021 
   8022 		/*
   8023 		 * Okay, we have the entire packet now.  The chip is
   8024 		 * configured to include the FCS except I350 and I21[01]
   8025 		 * (not all chips can be configured to strip it),
   8026 		 * so we need to trim it.
   8027 		 * May need to adjust length of previous mbuf in the
   8028 		 * chain if the current mbuf is too short.
   8029 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8030 		 * is always set in I350, so we don't trim it.
   8031 		 */
   8032 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8033 		    && (sc->sc_type != WM_T_I210)
   8034 		    && (sc->sc_type != WM_T_I211)) {
   8035 			if (m->m_len < ETHER_CRC_LEN) {
   8036 				rxq->rxq_tail->m_len
   8037 				    -= (ETHER_CRC_LEN - m->m_len);
   8038 				m->m_len = 0;
   8039 			} else
   8040 				m->m_len -= ETHER_CRC_LEN;
   8041 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8042 		} else
   8043 			len = rxq->rxq_len;
   8044 
   8045 		WM_RXCHAIN_LINK(rxq, m);
   8046 
   8047 		*rxq->rxq_tailp = NULL;
   8048 		m = rxq->rxq_head;
   8049 
   8050 		WM_RXCHAIN_RESET(rxq);
   8051 
   8052 		DPRINTF(WM_DEBUG_RX,
   8053 		    ("%s: RX: have entire packet, len -> %d\n",
   8054 		    device_xname(sc->sc_dev), len));
   8055 
   8056 		/* If an error occurred, update stats and drop the packet. */
   8057 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8058 			m_freem(m);
   8059 			continue;
   8060 		}
   8061 
   8062 		/* No errors.  Receive the packet. */
   8063 		m_set_rcvif(m, ifp);
   8064 		m->m_pkthdr.len = len;
   8065 		/*
   8066 		 * TODO
   8067 		 * should be save rsshash and rsstype to this mbuf.
   8068 		 */
   8069 		DPRINTF(WM_DEBUG_RX,
   8070 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8071 			device_xname(sc->sc_dev), rsstype, rsshash));
   8072 
   8073 		/*
   8074 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8075 		 * for us.  Associate the tag with the packet.
   8076 		 */
   8077 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8078 			continue;
   8079 
   8080 		/* Set up checksum info for this packet. */
   8081 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8082 		/*
   8083 		 * Update the receive pointer holding rxq_lock consistent with
   8084 		 * increment counter.
   8085 		 */
   8086 		rxq->rxq_ptr = i;
   8087 		rxq->rxq_packets++;
   8088 		rxq->rxq_bytes += len;
   8089 		mutex_exit(rxq->rxq_lock);
   8090 
   8091 		/* Pass it on. */
   8092 		if_percpuq_enqueue(sc->sc_ipq, m);
   8093 
   8094 		mutex_enter(rxq->rxq_lock);
   8095 
   8096 		if (rxq->rxq_stopping)
   8097 			break;
   8098 	}
   8099 
   8100 	if (count != 0)
   8101 		rnd_add_uint32(&sc->rnd_source, count);
   8102 
   8103 	DPRINTF(WM_DEBUG_RX,
   8104 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8105 }
   8106 
   8107 /*
   8108  * wm_linkintr_gmii:
   8109  *
   8110  *	Helper; handle link interrupts for GMII.
   8111  */
   8112 static void
   8113 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8114 {
   8115 
   8116 	KASSERT(WM_CORE_LOCKED(sc));
   8117 
   8118 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8119 		__func__));
   8120 
   8121 	if (icr & ICR_LSC) {
   8122 		uint32_t reg;
   8123 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8124 
   8125 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8126 			wm_gig_downshift_workaround_ich8lan(sc);
   8127 
   8128 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8129 			device_xname(sc->sc_dev)));
   8130 		mii_pollstat(&sc->sc_mii);
   8131 		if (sc->sc_type == WM_T_82543) {
   8132 			int miistatus, active;
   8133 
   8134 			/*
   8135 			 * With 82543, we need to force speed and
   8136 			 * duplex on the MAC equal to what the PHY
   8137 			 * speed and duplex configuration is.
   8138 			 */
   8139 			miistatus = sc->sc_mii.mii_media_status;
   8140 
   8141 			if (miistatus & IFM_ACTIVE) {
   8142 				active = sc->sc_mii.mii_media_active;
   8143 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8144 				switch (IFM_SUBTYPE(active)) {
   8145 				case IFM_10_T:
   8146 					sc->sc_ctrl |= CTRL_SPEED_10;
   8147 					break;
   8148 				case IFM_100_TX:
   8149 					sc->sc_ctrl |= CTRL_SPEED_100;
   8150 					break;
   8151 				case IFM_1000_T:
   8152 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8153 					break;
   8154 				default:
   8155 					/*
   8156 					 * fiber?
   8157 					 * Shoud not enter here.
   8158 					 */
   8159 					printf("unknown media (%x)\n", active);
   8160 					break;
   8161 				}
   8162 				if (active & IFM_FDX)
   8163 					sc->sc_ctrl |= CTRL_FD;
   8164 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8165 			}
   8166 		} else if ((sc->sc_type == WM_T_ICH8)
   8167 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8168 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8169 		} else if (sc->sc_type == WM_T_PCH) {
   8170 			wm_k1_gig_workaround_hv(sc,
   8171 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8172 		}
   8173 
   8174 		if ((sc->sc_phytype == WMPHY_82578)
   8175 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8176 			== IFM_1000_T)) {
   8177 
   8178 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8179 				delay(200*1000); /* XXX too big */
   8180 
   8181 				/* Link stall fix for link up */
   8182 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8183 				    HV_MUX_DATA_CTRL,
   8184 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8185 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8186 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8187 				    HV_MUX_DATA_CTRL,
   8188 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8189 			}
   8190 		}
   8191 		/*
   8192 		 * I217 Packet Loss issue:
   8193 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8194 		 * on power up.
   8195 		 * Set the Beacon Duration for I217 to 8 usec
   8196 		 */
   8197 		if ((sc->sc_type == WM_T_PCH_LPT)
   8198 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8199 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8200 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8201 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8202 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8203 		}
   8204 
   8205 		/* XXX Work-around I218 hang issue */
   8206 		/* e1000_k1_workaround_lpt_lp() */
   8207 
   8208 		if ((sc->sc_type == WM_T_PCH_LPT)
   8209 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8210 			/*
   8211 			 * Set platform power management values for Latency
   8212 			 * Tolerance Reporting (LTR)
   8213 			 */
   8214 			wm_platform_pm_pch_lpt(sc,
   8215 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8216 				    != 0));
   8217 		}
   8218 
   8219 		/* FEXTNVM6 K1-off workaround */
   8220 		if (sc->sc_type == WM_T_PCH_SPT) {
   8221 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8222 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8223 			    & FEXTNVM6_K1_OFF_ENABLE)
   8224 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8225 			else
   8226 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8227 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8228 		}
   8229 	} else if (icr & ICR_RXSEQ) {
   8230 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8231 			device_xname(sc->sc_dev)));
   8232 	}
   8233 }
   8234 
   8235 /*
   8236  * wm_linkintr_tbi:
   8237  *
   8238  *	Helper; handle link interrupts for TBI mode.
   8239  */
   8240 static void
   8241 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8242 {
   8243 	uint32_t status;
   8244 
   8245 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8246 		__func__));
   8247 
   8248 	status = CSR_READ(sc, WMREG_STATUS);
   8249 	if (icr & ICR_LSC) {
   8250 		if (status & STATUS_LU) {
   8251 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8252 			    device_xname(sc->sc_dev),
   8253 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8254 			/*
   8255 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8256 			 * so we should update sc->sc_ctrl
   8257 			 */
   8258 
   8259 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8260 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8261 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8262 			if (status & STATUS_FD)
   8263 				sc->sc_tctl |=
   8264 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8265 			else
   8266 				sc->sc_tctl |=
   8267 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8268 			if (sc->sc_ctrl & CTRL_TFCE)
   8269 				sc->sc_fcrtl |= FCRTL_XONE;
   8270 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8271 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8272 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8273 				      sc->sc_fcrtl);
   8274 			sc->sc_tbi_linkup = 1;
   8275 		} else {
   8276 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8277 			    device_xname(sc->sc_dev)));
   8278 			sc->sc_tbi_linkup = 0;
   8279 		}
   8280 		/* Update LED */
   8281 		wm_tbi_serdes_set_linkled(sc);
   8282 	} else if (icr & ICR_RXSEQ) {
   8283 		DPRINTF(WM_DEBUG_LINK,
   8284 		    ("%s: LINK: Receive sequence error\n",
   8285 		    device_xname(sc->sc_dev)));
   8286 	}
   8287 }
   8288 
   8289 /*
   8290  * wm_linkintr_serdes:
   8291  *
   8292  *	Helper; handle link interrupts for TBI mode.
   8293  */
   8294 static void
   8295 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8296 {
   8297 	struct mii_data *mii = &sc->sc_mii;
   8298 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8299 	uint32_t pcs_adv, pcs_lpab, reg;
   8300 
   8301 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8302 		__func__));
   8303 
   8304 	if (icr & ICR_LSC) {
   8305 		/* Check PCS */
   8306 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8307 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8308 			mii->mii_media_status |= IFM_ACTIVE;
   8309 			sc->sc_tbi_linkup = 1;
   8310 		} else {
   8311 			mii->mii_media_status |= IFM_NONE;
   8312 			sc->sc_tbi_linkup = 0;
   8313 			wm_tbi_serdes_set_linkled(sc);
   8314 			return;
   8315 		}
   8316 		mii->mii_media_active |= IFM_1000_SX;
   8317 		if ((reg & PCS_LSTS_FDX) != 0)
   8318 			mii->mii_media_active |= IFM_FDX;
   8319 		else
   8320 			mii->mii_media_active |= IFM_HDX;
   8321 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8322 			/* Check flow */
   8323 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8324 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8325 				DPRINTF(WM_DEBUG_LINK,
   8326 				    ("XXX LINKOK but not ACOMP\n"));
   8327 				return;
   8328 			}
   8329 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8330 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8331 			DPRINTF(WM_DEBUG_LINK,
   8332 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8333 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8334 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8335 				mii->mii_media_active |= IFM_FLOW
   8336 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8337 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8338 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8339 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8340 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8341 				mii->mii_media_active |= IFM_FLOW
   8342 				    | IFM_ETH_TXPAUSE;
   8343 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8344 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8345 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8346 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8347 				mii->mii_media_active |= IFM_FLOW
   8348 				    | IFM_ETH_RXPAUSE;
   8349 		}
   8350 		/* Update LED */
   8351 		wm_tbi_serdes_set_linkled(sc);
   8352 	} else {
   8353 		DPRINTF(WM_DEBUG_LINK,
   8354 		    ("%s: LINK: Receive sequence error\n",
   8355 		    device_xname(sc->sc_dev)));
   8356 	}
   8357 }
   8358 
   8359 /*
   8360  * wm_linkintr:
   8361  *
   8362  *	Helper; handle link interrupts.
   8363  */
   8364 static void
   8365 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8366 {
   8367 
   8368 	KASSERT(WM_CORE_LOCKED(sc));
   8369 
   8370 	if (sc->sc_flags & WM_F_HAS_MII)
   8371 		wm_linkintr_gmii(sc, icr);
   8372 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8373 	    && (sc->sc_type >= WM_T_82575))
   8374 		wm_linkintr_serdes(sc, icr);
   8375 	else
   8376 		wm_linkintr_tbi(sc, icr);
   8377 }
   8378 
   8379 /*
   8380  * wm_intr_legacy:
   8381  *
   8382  *	Interrupt service routine for INTx and MSI.
   8383  */
   8384 static int
   8385 wm_intr_legacy(void *arg)
   8386 {
   8387 	struct wm_softc *sc = arg;
   8388 	struct wm_queue *wmq = &sc->sc_queue[0];
   8389 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8390 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8391 	uint32_t icr, rndval = 0;
   8392 	int handled = 0;
   8393 
   8394 	DPRINTF(WM_DEBUG_TX,
   8395 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8396 	while (1 /* CONSTCOND */) {
   8397 		icr = CSR_READ(sc, WMREG_ICR);
   8398 		if ((icr & sc->sc_icr) == 0)
   8399 			break;
   8400 		if (rndval == 0)
   8401 			rndval = icr;
   8402 
   8403 		mutex_enter(rxq->rxq_lock);
   8404 
   8405 		if (rxq->rxq_stopping) {
   8406 			mutex_exit(rxq->rxq_lock);
   8407 			break;
   8408 		}
   8409 
   8410 		handled = 1;
   8411 
   8412 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8413 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8414 			DPRINTF(WM_DEBUG_RX,
   8415 			    ("%s: RX: got Rx intr 0x%08x\n",
   8416 			    device_xname(sc->sc_dev),
   8417 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8418 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8419 		}
   8420 #endif
   8421 		wm_rxeof(rxq, UINT_MAX);
   8422 
   8423 		mutex_exit(rxq->rxq_lock);
   8424 		mutex_enter(txq->txq_lock);
   8425 
   8426 		if (txq->txq_stopping) {
   8427 			mutex_exit(txq->txq_lock);
   8428 			break;
   8429 		}
   8430 
   8431 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8432 		if (icr & ICR_TXDW) {
   8433 			DPRINTF(WM_DEBUG_TX,
   8434 			    ("%s: TX: got TXDW interrupt\n",
   8435 			    device_xname(sc->sc_dev)));
   8436 			WM_Q_EVCNT_INCR(txq, txdw);
   8437 		}
   8438 #endif
   8439 		wm_txeof(sc, txq);
   8440 
   8441 		mutex_exit(txq->txq_lock);
   8442 		WM_CORE_LOCK(sc);
   8443 
   8444 		if (sc->sc_core_stopping) {
   8445 			WM_CORE_UNLOCK(sc);
   8446 			break;
   8447 		}
   8448 
   8449 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8450 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8451 			wm_linkintr(sc, icr);
   8452 		}
   8453 
   8454 		WM_CORE_UNLOCK(sc);
   8455 
   8456 		if (icr & ICR_RXO) {
   8457 #if defined(WM_DEBUG)
   8458 			log(LOG_WARNING, "%s: Receive overrun\n",
   8459 			    device_xname(sc->sc_dev));
   8460 #endif /* defined(WM_DEBUG) */
   8461 		}
   8462 	}
   8463 
   8464 	rnd_add_uint32(&sc->rnd_source, rndval);
   8465 
   8466 	if (handled) {
   8467 		/* Try to get more packets going. */
   8468 		softint_schedule(wmq->wmq_si);
   8469 	}
   8470 
   8471 	return handled;
   8472 }
   8473 
   8474 static inline void
   8475 wm_txrxintr_disable(struct wm_queue *wmq)
   8476 {
   8477 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8478 
   8479 	if (sc->sc_type == WM_T_82574)
   8480 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8481 	else if (sc->sc_type == WM_T_82575)
   8482 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8483 	else
   8484 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8485 }
   8486 
   8487 static inline void
   8488 wm_txrxintr_enable(struct wm_queue *wmq)
   8489 {
   8490 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8491 
   8492 	wm_itrs_calculate(sc, wmq);
   8493 
   8494 	if (sc->sc_type == WM_T_82574)
   8495 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8496 	else if (sc->sc_type == WM_T_82575)
   8497 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8498 	else
   8499 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8500 }
   8501 
   8502 static int
   8503 wm_txrxintr_msix(void *arg)
   8504 {
   8505 	struct wm_queue *wmq = arg;
   8506 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8507 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8508 	struct wm_softc *sc = txq->txq_sc;
   8509 	u_int limit = sc->sc_rx_intr_process_limit;
   8510 
   8511 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8512 
   8513 	DPRINTF(WM_DEBUG_TX,
   8514 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8515 
   8516 	wm_txrxintr_disable(wmq);
   8517 
   8518 	mutex_enter(txq->txq_lock);
   8519 
   8520 	if (txq->txq_stopping) {
   8521 		mutex_exit(txq->txq_lock);
   8522 		return 0;
   8523 	}
   8524 
   8525 	WM_Q_EVCNT_INCR(txq, txdw);
   8526 	wm_txeof(sc, txq);
   8527 	/* wm_deferred start() is done in wm_handle_queue(). */
   8528 	mutex_exit(txq->txq_lock);
   8529 
   8530 	DPRINTF(WM_DEBUG_RX,
   8531 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8532 	mutex_enter(rxq->rxq_lock);
   8533 
   8534 	if (rxq->rxq_stopping) {
   8535 		mutex_exit(rxq->rxq_lock);
   8536 		return 0;
   8537 	}
   8538 
   8539 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8540 	wm_rxeof(rxq, limit);
   8541 	mutex_exit(rxq->rxq_lock);
   8542 
   8543 	wm_itrs_writereg(sc, wmq);
   8544 
   8545 	softint_schedule(wmq->wmq_si);
   8546 
   8547 	return 1;
   8548 }
   8549 
   8550 static void
   8551 wm_handle_queue(void *arg)
   8552 {
   8553 	struct wm_queue *wmq = arg;
   8554 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8555 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8556 	struct wm_softc *sc = txq->txq_sc;
   8557 	u_int limit = sc->sc_rx_process_limit;
   8558 
   8559 	mutex_enter(txq->txq_lock);
   8560 	if (txq->txq_stopping) {
   8561 		mutex_exit(txq->txq_lock);
   8562 		return;
   8563 	}
   8564 	wm_txeof(sc, txq);
   8565 	wm_deferred_start_locked(txq);
   8566 	mutex_exit(txq->txq_lock);
   8567 
   8568 	mutex_enter(rxq->rxq_lock);
   8569 	if (rxq->rxq_stopping) {
   8570 		mutex_exit(rxq->rxq_lock);
   8571 		return;
   8572 	}
   8573 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8574 	wm_rxeof(rxq, limit);
   8575 	mutex_exit(rxq->rxq_lock);
   8576 
   8577 	wm_txrxintr_enable(wmq);
   8578 }
   8579 
   8580 /*
   8581  * wm_linkintr_msix:
   8582  *
   8583  *	Interrupt service routine for link status change for MSI-X.
   8584  */
   8585 static int
   8586 wm_linkintr_msix(void *arg)
   8587 {
   8588 	struct wm_softc *sc = arg;
   8589 	uint32_t reg;
   8590 
   8591 	DPRINTF(WM_DEBUG_LINK,
   8592 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8593 
   8594 	reg = CSR_READ(sc, WMREG_ICR);
   8595 	WM_CORE_LOCK(sc);
   8596 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8597 		goto out;
   8598 
   8599 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8600 	wm_linkintr(sc, ICR_LSC);
   8601 
   8602 out:
   8603 	WM_CORE_UNLOCK(sc);
   8604 
   8605 	if (sc->sc_type == WM_T_82574)
   8606 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8607 	else if (sc->sc_type == WM_T_82575)
   8608 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8609 	else
   8610 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8611 
   8612 	return 1;
   8613 }
   8614 
   8615 /*
   8616  * Media related.
   8617  * GMII, SGMII, TBI (and SERDES)
   8618  */
   8619 
   8620 /* Common */
   8621 
   8622 /*
   8623  * wm_tbi_serdes_set_linkled:
   8624  *
   8625  *	Update the link LED on TBI and SERDES devices.
   8626  */
   8627 static void
   8628 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8629 {
   8630 
   8631 	if (sc->sc_tbi_linkup)
   8632 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8633 	else
   8634 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8635 
   8636 	/* 82540 or newer devices are active low */
   8637 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8638 
   8639 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8640 }
   8641 
   8642 /* GMII related */
   8643 
   8644 /*
   8645  * wm_gmii_reset:
   8646  *
   8647  *	Reset the PHY.
   8648  */
   8649 static void
   8650 wm_gmii_reset(struct wm_softc *sc)
   8651 {
   8652 	uint32_t reg;
   8653 	int rv;
   8654 
   8655 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8656 		device_xname(sc->sc_dev), __func__));
   8657 
   8658 	rv = sc->phy.acquire(sc);
   8659 	if (rv != 0) {
   8660 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8661 		    __func__);
   8662 		return;
   8663 	}
   8664 
   8665 	switch (sc->sc_type) {
   8666 	case WM_T_82542_2_0:
   8667 	case WM_T_82542_2_1:
   8668 		/* null */
   8669 		break;
   8670 	case WM_T_82543:
   8671 		/*
   8672 		 * With 82543, we need to force speed and duplex on the MAC
   8673 		 * equal to what the PHY speed and duplex configuration is.
   8674 		 * In addition, we need to perform a hardware reset on the PHY
   8675 		 * to take it out of reset.
   8676 		 */
   8677 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8678 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8679 
   8680 		/* The PHY reset pin is active-low. */
   8681 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8682 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8683 		    CTRL_EXT_SWDPIN(4));
   8684 		reg |= CTRL_EXT_SWDPIO(4);
   8685 
   8686 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8687 		CSR_WRITE_FLUSH(sc);
   8688 		delay(10*1000);
   8689 
   8690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8691 		CSR_WRITE_FLUSH(sc);
   8692 		delay(150);
   8693 #if 0
   8694 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8695 #endif
   8696 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8697 		break;
   8698 	case WM_T_82544:	/* reset 10000us */
   8699 	case WM_T_82540:
   8700 	case WM_T_82545:
   8701 	case WM_T_82545_3:
   8702 	case WM_T_82546:
   8703 	case WM_T_82546_3:
   8704 	case WM_T_82541:
   8705 	case WM_T_82541_2:
   8706 	case WM_T_82547:
   8707 	case WM_T_82547_2:
   8708 	case WM_T_82571:	/* reset 100us */
   8709 	case WM_T_82572:
   8710 	case WM_T_82573:
   8711 	case WM_T_82574:
   8712 	case WM_T_82575:
   8713 	case WM_T_82576:
   8714 	case WM_T_82580:
   8715 	case WM_T_I350:
   8716 	case WM_T_I354:
   8717 	case WM_T_I210:
   8718 	case WM_T_I211:
   8719 	case WM_T_82583:
   8720 	case WM_T_80003:
   8721 		/* generic reset */
   8722 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8723 		CSR_WRITE_FLUSH(sc);
   8724 		delay(20000);
   8725 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8726 		CSR_WRITE_FLUSH(sc);
   8727 		delay(20000);
   8728 
   8729 		if ((sc->sc_type == WM_T_82541)
   8730 		    || (sc->sc_type == WM_T_82541_2)
   8731 		    || (sc->sc_type == WM_T_82547)
   8732 		    || (sc->sc_type == WM_T_82547_2)) {
   8733 			/* workaround for igp are done in igp_reset() */
   8734 			/* XXX add code to set LED after phy reset */
   8735 		}
   8736 		break;
   8737 	case WM_T_ICH8:
   8738 	case WM_T_ICH9:
   8739 	case WM_T_ICH10:
   8740 	case WM_T_PCH:
   8741 	case WM_T_PCH2:
   8742 	case WM_T_PCH_LPT:
   8743 	case WM_T_PCH_SPT:
   8744 		/* generic reset */
   8745 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8746 		CSR_WRITE_FLUSH(sc);
   8747 		delay(100);
   8748 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8749 		CSR_WRITE_FLUSH(sc);
   8750 		delay(150);
   8751 		break;
   8752 	default:
   8753 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8754 		    __func__);
   8755 		break;
   8756 	}
   8757 
   8758 	sc->phy.release(sc);
   8759 
   8760 	/* get_cfg_done */
   8761 	wm_get_cfg_done(sc);
   8762 
   8763 	/* extra setup */
   8764 	switch (sc->sc_type) {
   8765 	case WM_T_82542_2_0:
   8766 	case WM_T_82542_2_1:
   8767 	case WM_T_82543:
   8768 	case WM_T_82544:
   8769 	case WM_T_82540:
   8770 	case WM_T_82545:
   8771 	case WM_T_82545_3:
   8772 	case WM_T_82546:
   8773 	case WM_T_82546_3:
   8774 	case WM_T_82541_2:
   8775 	case WM_T_82547_2:
   8776 	case WM_T_82571:
   8777 	case WM_T_82572:
   8778 	case WM_T_82573:
   8779 	case WM_T_82575:
   8780 	case WM_T_82576:
   8781 	case WM_T_82580:
   8782 	case WM_T_I350:
   8783 	case WM_T_I354:
   8784 	case WM_T_I210:
   8785 	case WM_T_I211:
   8786 	case WM_T_80003:
   8787 		/* null */
   8788 		break;
   8789 	case WM_T_82574:
   8790 	case WM_T_82583:
   8791 		wm_lplu_d0_disable(sc);
   8792 		break;
   8793 	case WM_T_82541:
   8794 	case WM_T_82547:
   8795 		/* XXX Configure actively LED after PHY reset */
   8796 		break;
   8797 	case WM_T_ICH8:
   8798 	case WM_T_ICH9:
   8799 	case WM_T_ICH10:
   8800 	case WM_T_PCH:
   8801 	case WM_T_PCH2:
   8802 	case WM_T_PCH_LPT:
   8803 	case WM_T_PCH_SPT:
   8804 		/* Allow time for h/w to get to a quiescent state afer reset */
   8805 		delay(10*1000);
   8806 
   8807 		if (sc->sc_type == WM_T_PCH)
   8808 			wm_hv_phy_workaround_ich8lan(sc);
   8809 
   8810 		if (sc->sc_type == WM_T_PCH2)
   8811 			wm_lv_phy_workaround_ich8lan(sc);
   8812 
   8813 		/* Clear the host wakeup bit after lcd reset */
   8814 		if (sc->sc_type >= WM_T_PCH) {
   8815 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8816 			    BM_PORT_GEN_CFG);
   8817 			reg &= ~BM_WUC_HOST_WU_BIT;
   8818 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8819 			    BM_PORT_GEN_CFG, reg);
   8820 		}
   8821 
   8822 		/*
   8823 		 * XXX Configure the LCD with th extended configuration region
   8824 		 * in NVM
   8825 		 */
   8826 
   8827 		/* Disable D0 LPLU. */
   8828 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8829 			wm_lplu_d0_disable_pch(sc);
   8830 		else
   8831 			wm_lplu_d0_disable(sc);	/* ICH* */
   8832 		break;
   8833 	default:
   8834 		panic("%s: unknown type\n", __func__);
   8835 		break;
   8836 	}
   8837 }
   8838 
   8839 /*
   8840  * Setup sc_phytype and mii_{read|write}reg.
   8841  *
   8842  *  To identify PHY type, correct read/write function should be selected.
   8843  * To select correct read/write function, PCI ID or MAC type are required
   8844  * without accessing PHY registers.
   8845  *
   8846  *  On the first call of this function, PHY ID is not known yet. Check
   8847  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8848  * result might be incorrect.
   8849  *
   8850  *  In the second call, PHY OUI and model is used to identify PHY type.
   8851  * It might not be perfpect because of the lack of compared entry, but it
   8852  * would be better than the first call.
   8853  *
   8854  *  If the detected new result and previous assumption is different,
   8855  * diagnous message will be printed.
   8856  */
   8857 static void
   8858 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8859     uint16_t phy_model)
   8860 {
   8861 	device_t dev = sc->sc_dev;
   8862 	struct mii_data *mii = &sc->sc_mii;
   8863 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8864 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8865 	mii_readreg_t new_readreg;
   8866 	mii_writereg_t new_writereg;
   8867 
   8868 	if (mii->mii_readreg == NULL) {
   8869 		/*
   8870 		 *  This is the first call of this function. For ICH and PCH
   8871 		 * variants, it's difficult to determine the PHY access method
   8872 		 * by sc_type, so use the PCI product ID for some devices.
   8873 		 */
   8874 
   8875 		switch (sc->sc_pcidevid) {
   8876 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8877 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8878 			/* 82577 */
   8879 			new_phytype = WMPHY_82577;
   8880 			break;
   8881 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8882 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8883 			/* 82578 */
   8884 			new_phytype = WMPHY_82578;
   8885 			break;
   8886 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8887 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8888 			/* 82579 */
   8889 			new_phytype = WMPHY_82579;
   8890 			break;
   8891 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8892 		case PCI_PRODUCT_INTEL_82801I_BM:
   8893 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8894 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8895 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8896 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8897 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8898 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8899 			/* ICH8, 9, 10 with 82567 */
   8900 			new_phytype = WMPHY_BM;
   8901 			break;
   8902 		default:
   8903 			break;
   8904 		}
   8905 	} else {
   8906 		/* It's not the first call. Use PHY OUI and model */
   8907 		switch (phy_oui) {
   8908 		case MII_OUI_ATHEROS: /* XXX ??? */
   8909 			switch (phy_model) {
   8910 			case 0x0004: /* XXX */
   8911 				new_phytype = WMPHY_82578;
   8912 				break;
   8913 			default:
   8914 				break;
   8915 			}
   8916 			break;
   8917 		case MII_OUI_xxMARVELL:
   8918 			switch (phy_model) {
   8919 			case MII_MODEL_xxMARVELL_I210:
   8920 				new_phytype = WMPHY_I210;
   8921 				break;
   8922 			case MII_MODEL_xxMARVELL_E1011:
   8923 			case MII_MODEL_xxMARVELL_E1000_3:
   8924 			case MII_MODEL_xxMARVELL_E1000_5:
   8925 			case MII_MODEL_xxMARVELL_E1112:
   8926 				new_phytype = WMPHY_M88;
   8927 				break;
   8928 			case MII_MODEL_xxMARVELL_E1149:
   8929 				new_phytype = WMPHY_BM;
   8930 				break;
   8931 			case MII_MODEL_xxMARVELL_E1111:
   8932 			case MII_MODEL_xxMARVELL_I347:
   8933 			case MII_MODEL_xxMARVELL_E1512:
   8934 			case MII_MODEL_xxMARVELL_E1340M:
   8935 			case MII_MODEL_xxMARVELL_E1543:
   8936 				new_phytype = WMPHY_M88;
   8937 				break;
   8938 			case MII_MODEL_xxMARVELL_I82563:
   8939 				new_phytype = WMPHY_GG82563;
   8940 				break;
   8941 			default:
   8942 				break;
   8943 			}
   8944 			break;
   8945 		case MII_OUI_INTEL:
   8946 			switch (phy_model) {
   8947 			case MII_MODEL_INTEL_I82577:
   8948 				new_phytype = WMPHY_82577;
   8949 				break;
   8950 			case MII_MODEL_INTEL_I82579:
   8951 				new_phytype = WMPHY_82579;
   8952 				break;
   8953 			case MII_MODEL_INTEL_I217:
   8954 				new_phytype = WMPHY_I217;
   8955 				break;
   8956 			case MII_MODEL_INTEL_I82580:
   8957 			case MII_MODEL_INTEL_I350:
   8958 				new_phytype = WMPHY_82580;
   8959 				break;
   8960 			default:
   8961 				break;
   8962 			}
   8963 			break;
   8964 		case MII_OUI_yyINTEL:
   8965 			switch (phy_model) {
   8966 			case MII_MODEL_yyINTEL_I82562G:
   8967 			case MII_MODEL_yyINTEL_I82562EM:
   8968 			case MII_MODEL_yyINTEL_I82562ET:
   8969 				new_phytype = WMPHY_IFE;
   8970 				break;
   8971 			case MII_MODEL_yyINTEL_IGP01E1000:
   8972 				new_phytype = WMPHY_IGP;
   8973 				break;
   8974 			case MII_MODEL_yyINTEL_I82566:
   8975 				new_phytype = WMPHY_IGP_3;
   8976 				break;
   8977 			default:
   8978 				break;
   8979 			}
   8980 			break;
   8981 		default:
   8982 			break;
   8983 		}
   8984 		if (new_phytype == WMPHY_UNKNOWN)
   8985 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8986 			    __func__);
   8987 
   8988 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8989 		    && (sc->sc_phytype != new_phytype )) {
   8990 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8991 			    "was incorrect. PHY type from PHY ID = %u\n",
   8992 			    sc->sc_phytype, new_phytype);
   8993 		}
   8994 	}
   8995 
   8996 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8997 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8998 		/* SGMII */
   8999 		new_readreg = wm_sgmii_readreg;
   9000 		new_writereg = wm_sgmii_writereg;
   9001 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9002 		/* BM2 (phyaddr == 1) */
   9003 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9004 		    && (new_phytype != WMPHY_BM)
   9005 		    && (new_phytype != WMPHY_UNKNOWN))
   9006 			doubt_phytype = new_phytype;
   9007 		new_phytype = WMPHY_BM;
   9008 		new_readreg = wm_gmii_bm_readreg;
   9009 		new_writereg = wm_gmii_bm_writereg;
   9010 	} else if (sc->sc_type >= WM_T_PCH) {
   9011 		/* All PCH* use _hv_ */
   9012 		new_readreg = wm_gmii_hv_readreg;
   9013 		new_writereg = wm_gmii_hv_writereg;
   9014 	} else if (sc->sc_type >= WM_T_ICH8) {
   9015 		/* non-82567 ICH8, 9 and 10 */
   9016 		new_readreg = wm_gmii_i82544_readreg;
   9017 		new_writereg = wm_gmii_i82544_writereg;
   9018 	} else if (sc->sc_type >= WM_T_80003) {
   9019 		/* 80003 */
   9020 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9021 		    && (new_phytype != WMPHY_GG82563)
   9022 		    && (new_phytype != WMPHY_UNKNOWN))
   9023 			doubt_phytype = new_phytype;
   9024 		new_phytype = WMPHY_GG82563;
   9025 		new_readreg = wm_gmii_i80003_readreg;
   9026 		new_writereg = wm_gmii_i80003_writereg;
   9027 	} else if (sc->sc_type >= WM_T_I210) {
   9028 		/* I210 and I211 */
   9029 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9030 		    && (new_phytype != WMPHY_I210)
   9031 		    && (new_phytype != WMPHY_UNKNOWN))
   9032 			doubt_phytype = new_phytype;
   9033 		new_phytype = WMPHY_I210;
   9034 		new_readreg = wm_gmii_gs40g_readreg;
   9035 		new_writereg = wm_gmii_gs40g_writereg;
   9036 	} else if (sc->sc_type >= WM_T_82580) {
   9037 		/* 82580, I350 and I354 */
   9038 		new_readreg = wm_gmii_82580_readreg;
   9039 		new_writereg = wm_gmii_82580_writereg;
   9040 	} else if (sc->sc_type >= WM_T_82544) {
   9041 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9042 		new_readreg = wm_gmii_i82544_readreg;
   9043 		new_writereg = wm_gmii_i82544_writereg;
   9044 	} else {
   9045 		new_readreg = wm_gmii_i82543_readreg;
   9046 		new_writereg = wm_gmii_i82543_writereg;
   9047 	}
   9048 
   9049 	if (new_phytype == WMPHY_BM) {
   9050 		/* All BM use _bm_ */
   9051 		new_readreg = wm_gmii_bm_readreg;
   9052 		new_writereg = wm_gmii_bm_writereg;
   9053 	}
   9054 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9055 		/* All PCH* use _hv_ */
   9056 		new_readreg = wm_gmii_hv_readreg;
   9057 		new_writereg = wm_gmii_hv_writereg;
   9058 	}
   9059 
   9060 	/* Diag output */
   9061 	if (doubt_phytype != WMPHY_UNKNOWN)
   9062 		aprint_error_dev(dev, "Assumed new PHY type was "
   9063 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9064 		    new_phytype);
   9065 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9066 	    && (sc->sc_phytype != new_phytype ))
   9067 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9068 		    "was incorrect. New PHY type = %u\n",
   9069 		    sc->sc_phytype, new_phytype);
   9070 
   9071 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9072 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9073 
   9074 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9075 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9076 		    "function was incorrect.\n");
   9077 
   9078 	/* Update now */
   9079 	sc->sc_phytype = new_phytype;
   9080 	mii->mii_readreg = new_readreg;
   9081 	mii->mii_writereg = new_writereg;
   9082 }
   9083 
   9084 /*
   9085  * wm_get_phy_id_82575:
   9086  *
   9087  * Return PHY ID. Return -1 if it failed.
   9088  */
   9089 static int
   9090 wm_get_phy_id_82575(struct wm_softc *sc)
   9091 {
   9092 	uint32_t reg;
   9093 	int phyid = -1;
   9094 
   9095 	/* XXX */
   9096 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9097 		return -1;
   9098 
   9099 	if (wm_sgmii_uses_mdio(sc)) {
   9100 		switch (sc->sc_type) {
   9101 		case WM_T_82575:
   9102 		case WM_T_82576:
   9103 			reg = CSR_READ(sc, WMREG_MDIC);
   9104 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9105 			break;
   9106 		case WM_T_82580:
   9107 		case WM_T_I350:
   9108 		case WM_T_I354:
   9109 		case WM_T_I210:
   9110 		case WM_T_I211:
   9111 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9112 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9113 			break;
   9114 		default:
   9115 			return -1;
   9116 		}
   9117 	}
   9118 
   9119 	return phyid;
   9120 }
   9121 
   9122 
   9123 /*
   9124  * wm_gmii_mediainit:
   9125  *
   9126  *	Initialize media for use on 1000BASE-T devices.
   9127  */
   9128 static void
   9129 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9130 {
   9131 	device_t dev = sc->sc_dev;
   9132 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9133 	struct mii_data *mii = &sc->sc_mii;
   9134 	uint32_t reg;
   9135 
   9136 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9137 		device_xname(sc->sc_dev), __func__));
   9138 
   9139 	/* We have GMII. */
   9140 	sc->sc_flags |= WM_F_HAS_MII;
   9141 
   9142 	if (sc->sc_type == WM_T_80003)
   9143 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9144 	else
   9145 		sc->sc_tipg = TIPG_1000T_DFLT;
   9146 
   9147 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9148 	if ((sc->sc_type == WM_T_82580)
   9149 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9150 	    || (sc->sc_type == WM_T_I211)) {
   9151 		reg = CSR_READ(sc, WMREG_PHPM);
   9152 		reg &= ~PHPM_GO_LINK_D;
   9153 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9154 	}
   9155 
   9156 	/*
   9157 	 * Let the chip set speed/duplex on its own based on
   9158 	 * signals from the PHY.
   9159 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9160 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9161 	 */
   9162 	sc->sc_ctrl |= CTRL_SLU;
   9163 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9164 
   9165 	/* Initialize our media structures and probe the GMII. */
   9166 	mii->mii_ifp = ifp;
   9167 
   9168 	/*
   9169 	 * The first call of wm_mii_setup_phytype. The result might be
   9170 	 * incorrect.
   9171 	 */
   9172 	wm_gmii_setup_phytype(sc, 0, 0);
   9173 
   9174 	mii->mii_statchg = wm_gmii_statchg;
   9175 
   9176 	/* get PHY control from SMBus to PCIe */
   9177 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9178 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9179 		wm_smbustopci(sc);
   9180 
   9181 	wm_gmii_reset(sc);
   9182 
   9183 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9184 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9185 	    wm_gmii_mediastatus);
   9186 
   9187 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9188 	    || (sc->sc_type == WM_T_82580)
   9189 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9190 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9191 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9192 			/* Attach only one port */
   9193 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9194 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9195 		} else {
   9196 			int i, id;
   9197 			uint32_t ctrl_ext;
   9198 
   9199 			id = wm_get_phy_id_82575(sc);
   9200 			if (id != -1) {
   9201 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9202 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9203 			}
   9204 			if ((id == -1)
   9205 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9206 				/* Power on sgmii phy if it is disabled */
   9207 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9208 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9209 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9210 				CSR_WRITE_FLUSH(sc);
   9211 				delay(300*1000); /* XXX too long */
   9212 
   9213 				/* from 1 to 8 */
   9214 				for (i = 1; i < 8; i++)
   9215 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9216 					    0xffffffff, i, MII_OFFSET_ANY,
   9217 					    MIIF_DOPAUSE);
   9218 
   9219 				/* restore previous sfp cage power state */
   9220 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9221 			}
   9222 		}
   9223 	} else {
   9224 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9225 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9226 	}
   9227 
   9228 	/*
   9229 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9230 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9231 	 */
   9232 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9233 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9234 		wm_set_mdio_slow_mode_hv(sc);
   9235 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9236 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9237 	}
   9238 
   9239 	/*
   9240 	 * (For ICH8 variants)
   9241 	 * If PHY detection failed, use BM's r/w function and retry.
   9242 	 */
   9243 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9244 		/* if failed, retry with *_bm_* */
   9245 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9246 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9247 		    sc->sc_phytype);
   9248 		sc->sc_phytype = WMPHY_BM;
   9249 		mii->mii_readreg = wm_gmii_bm_readreg;
   9250 		mii->mii_writereg = wm_gmii_bm_writereg;
   9251 
   9252 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9253 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9254 	}
   9255 
   9256 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9257 		/* Any PHY wasn't find */
   9258 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9259 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9260 		sc->sc_phytype = WMPHY_NONE;
   9261 	} else {
   9262 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9263 
   9264 		/*
   9265 		 * PHY Found! Check PHY type again by the second call of
   9266 		 * wm_mii_setup_phytype.
   9267 		 */
   9268 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9269 		    child->mii_mpd_model);
   9270 
   9271 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9272 	}
   9273 }
   9274 
   9275 /*
   9276  * wm_gmii_mediachange:	[ifmedia interface function]
   9277  *
   9278  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9279  */
   9280 static int
   9281 wm_gmii_mediachange(struct ifnet *ifp)
   9282 {
   9283 	struct wm_softc *sc = ifp->if_softc;
   9284 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9285 	int rc;
   9286 
   9287 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9288 		device_xname(sc->sc_dev), __func__));
   9289 	if ((ifp->if_flags & IFF_UP) == 0)
   9290 		return 0;
   9291 
   9292 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9293 	sc->sc_ctrl |= CTRL_SLU;
   9294 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9295 	    || (sc->sc_type > WM_T_82543)) {
   9296 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9297 	} else {
   9298 		sc->sc_ctrl &= ~CTRL_ASDE;
   9299 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9300 		if (ife->ifm_media & IFM_FDX)
   9301 			sc->sc_ctrl |= CTRL_FD;
   9302 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9303 		case IFM_10_T:
   9304 			sc->sc_ctrl |= CTRL_SPEED_10;
   9305 			break;
   9306 		case IFM_100_TX:
   9307 			sc->sc_ctrl |= CTRL_SPEED_100;
   9308 			break;
   9309 		case IFM_1000_T:
   9310 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9311 			break;
   9312 		default:
   9313 			panic("wm_gmii_mediachange: bad media 0x%x",
   9314 			    ife->ifm_media);
   9315 		}
   9316 	}
   9317 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9318 	if (sc->sc_type <= WM_T_82543)
   9319 		wm_gmii_reset(sc);
   9320 
   9321 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9322 		return 0;
   9323 	return rc;
   9324 }
   9325 
   9326 /*
   9327  * wm_gmii_mediastatus:	[ifmedia interface function]
   9328  *
   9329  *	Get the current interface media status on a 1000BASE-T device.
   9330  */
   9331 static void
   9332 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9333 {
   9334 	struct wm_softc *sc = ifp->if_softc;
   9335 
   9336 	ether_mediastatus(ifp, ifmr);
   9337 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9338 	    | sc->sc_flowflags;
   9339 }
   9340 
   9341 #define	MDI_IO		CTRL_SWDPIN(2)
   9342 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9343 #define	MDI_CLK		CTRL_SWDPIN(3)
   9344 
   9345 static void
   9346 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9347 {
   9348 	uint32_t i, v;
   9349 
   9350 	v = CSR_READ(sc, WMREG_CTRL);
   9351 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9352 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9353 
   9354 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9355 		if (data & i)
   9356 			v |= MDI_IO;
   9357 		else
   9358 			v &= ~MDI_IO;
   9359 		CSR_WRITE(sc, WMREG_CTRL, v);
   9360 		CSR_WRITE_FLUSH(sc);
   9361 		delay(10);
   9362 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9363 		CSR_WRITE_FLUSH(sc);
   9364 		delay(10);
   9365 		CSR_WRITE(sc, WMREG_CTRL, v);
   9366 		CSR_WRITE_FLUSH(sc);
   9367 		delay(10);
   9368 	}
   9369 }
   9370 
   9371 static uint32_t
   9372 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9373 {
   9374 	uint32_t v, i, data = 0;
   9375 
   9376 	v = CSR_READ(sc, WMREG_CTRL);
   9377 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9378 	v |= CTRL_SWDPIO(3);
   9379 
   9380 	CSR_WRITE(sc, WMREG_CTRL, v);
   9381 	CSR_WRITE_FLUSH(sc);
   9382 	delay(10);
   9383 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9384 	CSR_WRITE_FLUSH(sc);
   9385 	delay(10);
   9386 	CSR_WRITE(sc, WMREG_CTRL, v);
   9387 	CSR_WRITE_FLUSH(sc);
   9388 	delay(10);
   9389 
   9390 	for (i = 0; i < 16; i++) {
   9391 		data <<= 1;
   9392 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9393 		CSR_WRITE_FLUSH(sc);
   9394 		delay(10);
   9395 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9396 			data |= 1;
   9397 		CSR_WRITE(sc, WMREG_CTRL, v);
   9398 		CSR_WRITE_FLUSH(sc);
   9399 		delay(10);
   9400 	}
   9401 
   9402 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9403 	CSR_WRITE_FLUSH(sc);
   9404 	delay(10);
   9405 	CSR_WRITE(sc, WMREG_CTRL, v);
   9406 	CSR_WRITE_FLUSH(sc);
   9407 	delay(10);
   9408 
   9409 	return data;
   9410 }
   9411 
   9412 #undef MDI_IO
   9413 #undef MDI_DIR
   9414 #undef MDI_CLK
   9415 
   9416 /*
   9417  * wm_gmii_i82543_readreg:	[mii interface function]
   9418  *
   9419  *	Read a PHY register on the GMII (i82543 version).
   9420  */
   9421 static int
   9422 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9423 {
   9424 	struct wm_softc *sc = device_private(self);
   9425 	int rv;
   9426 
   9427 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9428 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9429 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9430 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9431 
   9432 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9433 	    device_xname(sc->sc_dev), phy, reg, rv));
   9434 
   9435 	return rv;
   9436 }
   9437 
   9438 /*
   9439  * wm_gmii_i82543_writereg:	[mii interface function]
   9440  *
   9441  *	Write a PHY register on the GMII (i82543 version).
   9442  */
   9443 static void
   9444 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9445 {
   9446 	struct wm_softc *sc = device_private(self);
   9447 
   9448 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9449 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9450 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9451 	    (MII_COMMAND_START << 30), 32);
   9452 }
   9453 
   9454 /*
   9455  * wm_gmii_mdic_readreg:	[mii interface function]
   9456  *
   9457  *	Read a PHY register on the GMII.
   9458  */
   9459 static int
   9460 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9461 {
   9462 	struct wm_softc *sc = device_private(self);
   9463 	uint32_t mdic = 0;
   9464 	int i, rv;
   9465 
   9466 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9467 	    MDIC_REGADD(reg));
   9468 
   9469 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9470 		mdic = CSR_READ(sc, WMREG_MDIC);
   9471 		if (mdic & MDIC_READY)
   9472 			break;
   9473 		delay(50);
   9474 	}
   9475 
   9476 	if ((mdic & MDIC_READY) == 0) {
   9477 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9478 		    device_xname(sc->sc_dev), phy, reg);
   9479 		rv = 0;
   9480 	} else if (mdic & MDIC_E) {
   9481 #if 0 /* This is normal if no PHY is present. */
   9482 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9483 		    device_xname(sc->sc_dev), phy, reg);
   9484 #endif
   9485 		rv = 0;
   9486 	} else {
   9487 		rv = MDIC_DATA(mdic);
   9488 		if (rv == 0xffff)
   9489 			rv = 0;
   9490 	}
   9491 
   9492 	return rv;
   9493 }
   9494 
   9495 /*
   9496  * wm_gmii_mdic_writereg:	[mii interface function]
   9497  *
   9498  *	Write a PHY register on the GMII.
   9499  */
   9500 static void
   9501 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9502 {
   9503 	struct wm_softc *sc = device_private(self);
   9504 	uint32_t mdic = 0;
   9505 	int i;
   9506 
   9507 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9508 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9509 
   9510 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9511 		mdic = CSR_READ(sc, WMREG_MDIC);
   9512 		if (mdic & MDIC_READY)
   9513 			break;
   9514 		delay(50);
   9515 	}
   9516 
   9517 	if ((mdic & MDIC_READY) == 0)
   9518 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9519 		    device_xname(sc->sc_dev), phy, reg);
   9520 	else if (mdic & MDIC_E)
   9521 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9522 		    device_xname(sc->sc_dev), phy, reg);
   9523 }
   9524 
   9525 /*
   9526  * wm_gmii_i82544_readreg:	[mii interface function]
   9527  *
   9528  *	Read a PHY register on the GMII.
   9529  */
   9530 static int
   9531 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9532 {
   9533 	struct wm_softc *sc = device_private(self);
   9534 	int rv;
   9535 
   9536 	if (sc->phy.acquire(sc)) {
   9537 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9538 		    __func__);
   9539 		return 0;
   9540 	}
   9541 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9542 	sc->phy.release(sc);
   9543 
   9544 	return rv;
   9545 }
   9546 
   9547 /*
   9548  * wm_gmii_i82544_writereg:	[mii interface function]
   9549  *
   9550  *	Write a PHY register on the GMII.
   9551  */
   9552 static void
   9553 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9554 {
   9555 	struct wm_softc *sc = device_private(self);
   9556 
   9557 	if (sc->phy.acquire(sc)) {
   9558 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9559 		    __func__);
   9560 	}
   9561 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9562 	sc->phy.release(sc);
   9563 }
   9564 
   9565 /*
   9566  * wm_gmii_i80003_readreg:	[mii interface function]
   9567  *
   9568  *	Read a PHY register on the kumeran
   9569  * This could be handled by the PHY layer if we didn't have to lock the
   9570  * ressource ...
   9571  */
   9572 static int
   9573 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9574 {
   9575 	struct wm_softc *sc = device_private(self);
   9576 	int rv;
   9577 
   9578 	if (phy != 1) /* only one PHY on kumeran bus */
   9579 		return 0;
   9580 
   9581 	if (sc->phy.acquire(sc)) {
   9582 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9583 		    __func__);
   9584 		return 0;
   9585 	}
   9586 
   9587 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9588 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9589 		    reg >> GG82563_PAGE_SHIFT);
   9590 	} else {
   9591 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9592 		    reg >> GG82563_PAGE_SHIFT);
   9593 	}
   9594 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9595 	delay(200);
   9596 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9597 	delay(200);
   9598 	sc->phy.release(sc);
   9599 
   9600 	return rv;
   9601 }
   9602 
   9603 /*
   9604  * wm_gmii_i80003_writereg:	[mii interface function]
   9605  *
   9606  *	Write a PHY register on the kumeran.
   9607  * This could be handled by the PHY layer if we didn't have to lock the
   9608  * ressource ...
   9609  */
   9610 static void
   9611 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9612 {
   9613 	struct wm_softc *sc = device_private(self);
   9614 
   9615 	if (phy != 1) /* only one PHY on kumeran bus */
   9616 		return;
   9617 
   9618 	if (sc->phy.acquire(sc)) {
   9619 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9620 		    __func__);
   9621 		return;
   9622 	}
   9623 
   9624 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9625 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9626 		    reg >> GG82563_PAGE_SHIFT);
   9627 	} else {
   9628 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9629 		    reg >> GG82563_PAGE_SHIFT);
   9630 	}
   9631 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9632 	delay(200);
   9633 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9634 	delay(200);
   9635 
   9636 	sc->phy.release(sc);
   9637 }
   9638 
   9639 /*
   9640  * wm_gmii_bm_readreg:	[mii interface function]
   9641  *
   9642  *	Read a PHY register on the kumeran
   9643  * This could be handled by the PHY layer if we didn't have to lock the
   9644  * ressource ...
   9645  */
   9646 static int
   9647 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9648 {
   9649 	struct wm_softc *sc = device_private(self);
   9650 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9651 	uint16_t val;
   9652 	int rv;
   9653 
   9654 	if (sc->phy.acquire(sc)) {
   9655 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9656 		    __func__);
   9657 		return 0;
   9658 	}
   9659 
   9660 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9661 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9662 		    || (reg == 31)) ? 1 : phy;
   9663 	/* Page 800 works differently than the rest so it has its own func */
   9664 	if (page == BM_WUC_PAGE) {
   9665 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9666 		rv = val;
   9667 		goto release;
   9668 	}
   9669 
   9670 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9671 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9672 		    && (sc->sc_type != WM_T_82583))
   9673 			wm_gmii_mdic_writereg(self, phy,
   9674 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9675 		else
   9676 			wm_gmii_mdic_writereg(self, phy,
   9677 			    BME1000_PHY_PAGE_SELECT, page);
   9678 	}
   9679 
   9680 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9681 
   9682 release:
   9683 	sc->phy.release(sc);
   9684 	return rv;
   9685 }
   9686 
   9687 /*
   9688  * wm_gmii_bm_writereg:	[mii interface function]
   9689  *
   9690  *	Write a PHY register on the kumeran.
   9691  * This could be handled by the PHY layer if we didn't have to lock the
   9692  * ressource ...
   9693  */
   9694 static void
   9695 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9696 {
   9697 	struct wm_softc *sc = device_private(self);
   9698 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9699 
   9700 	if (sc->phy.acquire(sc)) {
   9701 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9702 		    __func__);
   9703 		return;
   9704 	}
   9705 
   9706 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9707 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9708 		    || (reg == 31)) ? 1 : phy;
   9709 	/* Page 800 works differently than the rest so it has its own func */
   9710 	if (page == BM_WUC_PAGE) {
   9711 		uint16_t tmp;
   9712 
   9713 		tmp = val;
   9714 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9715 		goto release;
   9716 	}
   9717 
   9718 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9719 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9720 		    && (sc->sc_type != WM_T_82583))
   9721 			wm_gmii_mdic_writereg(self, phy,
   9722 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9723 		else
   9724 			wm_gmii_mdic_writereg(self, phy,
   9725 			    BME1000_PHY_PAGE_SELECT, page);
   9726 	}
   9727 
   9728 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9729 
   9730 release:
   9731 	sc->phy.release(sc);
   9732 }
   9733 
   9734 static void
   9735 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9736 {
   9737 	struct wm_softc *sc = device_private(self);
   9738 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9739 	uint16_t wuce, reg;
   9740 
   9741 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9742 		device_xname(sc->sc_dev), __func__));
   9743 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9744 	if (sc->sc_type == WM_T_PCH) {
   9745 		/* XXX e1000 driver do nothing... why? */
   9746 	}
   9747 
   9748 	/*
   9749 	 * 1) Enable PHY wakeup register first.
   9750 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9751 	 */
   9752 
   9753 	/* Set page 769 */
   9754 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9755 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9756 
   9757 	/* Read WUCE and save it */
   9758 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9759 
   9760 	reg = wuce | BM_WUC_ENABLE_BIT;
   9761 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9762 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9763 
   9764 	/* Select page 800 */
   9765 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9766 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9767 
   9768 	/*
   9769 	 * 2) Access PHY wakeup register.
   9770 	 * See e1000_access_phy_wakeup_reg_bm.
   9771 	 */
   9772 
   9773 	/* Write page 800 */
   9774 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9775 
   9776 	if (rd)
   9777 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9778 	else
   9779 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9780 
   9781 	/*
   9782 	 * 3) Disable PHY wakeup register.
   9783 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9784 	 */
   9785 	/* Set page 769 */
   9786 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9787 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9788 
   9789 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9790 }
   9791 
   9792 /*
   9793  * wm_gmii_hv_readreg:	[mii interface function]
   9794  *
   9795  *	Read a PHY register on the kumeran
   9796  * This could be handled by the PHY layer if we didn't have to lock the
   9797  * ressource ...
   9798  */
   9799 static int
   9800 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9801 {
   9802 	struct wm_softc *sc = device_private(self);
   9803 	int rv;
   9804 
   9805 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9806 		device_xname(sc->sc_dev), __func__));
   9807 	if (sc->phy.acquire(sc)) {
   9808 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9809 		    __func__);
   9810 		return 0;
   9811 	}
   9812 
   9813 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9814 	sc->phy.release(sc);
   9815 	return rv;
   9816 }
   9817 
   9818 static int
   9819 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9820 {
   9821 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9822 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9823 	uint16_t val;
   9824 	int rv;
   9825 
   9826 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9827 
   9828 	/* Page 800 works differently than the rest so it has its own func */
   9829 	if (page == BM_WUC_PAGE) {
   9830 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9831 		return val;
   9832 	}
   9833 
   9834 	/*
   9835 	 * Lower than page 768 works differently than the rest so it has its
   9836 	 * own func
   9837 	 */
   9838 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9839 		printf("gmii_hv_readreg!!!\n");
   9840 		return 0;
   9841 	}
   9842 
   9843 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9844 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9845 		    page << BME1000_PAGE_SHIFT);
   9846 	}
   9847 
   9848 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9849 	return rv;
   9850 }
   9851 
   9852 /*
   9853  * wm_gmii_hv_writereg:	[mii interface function]
   9854  *
   9855  *	Write a PHY register on the kumeran.
   9856  * This could be handled by the PHY layer if we didn't have to lock the
   9857  * ressource ...
   9858  */
   9859 static void
   9860 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9861 {
   9862 	struct wm_softc *sc = device_private(self);
   9863 
   9864 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9865 		device_xname(sc->sc_dev), __func__));
   9866 
   9867 	if (sc->phy.acquire(sc)) {
   9868 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9869 		    __func__);
   9870 		return;
   9871 	}
   9872 
   9873 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9874 	sc->phy.release(sc);
   9875 }
   9876 
   9877 static void
   9878 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9879 {
   9880 	struct wm_softc *sc = device_private(self);
   9881 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9882 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9883 
   9884 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9885 
   9886 	/* Page 800 works differently than the rest so it has its own func */
   9887 	if (page == BM_WUC_PAGE) {
   9888 		uint16_t tmp;
   9889 
   9890 		tmp = val;
   9891 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9892 		return;
   9893 	}
   9894 
   9895 	/*
   9896 	 * Lower than page 768 works differently than the rest so it has its
   9897 	 * own func
   9898 	 */
   9899 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9900 		printf("gmii_hv_writereg!!!\n");
   9901 		return;
   9902 	}
   9903 
   9904 	{
   9905 		/*
   9906 		 * XXX Workaround MDIO accesses being disabled after entering
   9907 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9908 		 * register is set)
   9909 		 */
   9910 		if (sc->sc_phytype == WMPHY_82578) {
   9911 			struct mii_softc *child;
   9912 
   9913 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9914 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9915 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9916 			    && ((val & (1 << 11)) != 0)) {
   9917 				printf("XXX need workaround\n");
   9918 			}
   9919 		}
   9920 
   9921 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9922 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9923 			    page << BME1000_PAGE_SHIFT);
   9924 		}
   9925 	}
   9926 
   9927 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9928 }
   9929 
   9930 /*
   9931  * wm_gmii_82580_readreg:	[mii interface function]
   9932  *
   9933  *	Read a PHY register on the 82580 and I350.
   9934  * This could be handled by the PHY layer if we didn't have to lock the
   9935  * ressource ...
   9936  */
   9937 static int
   9938 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9939 {
   9940 	struct wm_softc *sc = device_private(self);
   9941 	int rv;
   9942 
   9943 	if (sc->phy.acquire(sc) != 0) {
   9944 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9945 		    __func__);
   9946 		return 0;
   9947 	}
   9948 
   9949 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9950 
   9951 	sc->phy.release(sc);
   9952 	return rv;
   9953 }
   9954 
   9955 /*
   9956  * wm_gmii_82580_writereg:	[mii interface function]
   9957  *
   9958  *	Write a PHY register on the 82580 and I350.
   9959  * This could be handled by the PHY layer if we didn't have to lock the
   9960  * ressource ...
   9961  */
   9962 static void
   9963 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9964 {
   9965 	struct wm_softc *sc = device_private(self);
   9966 
   9967 	if (sc->phy.acquire(sc) != 0) {
   9968 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9969 		    __func__);
   9970 		return;
   9971 	}
   9972 
   9973 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9974 
   9975 	sc->phy.release(sc);
   9976 }
   9977 
   9978 /*
   9979  * wm_gmii_gs40g_readreg:	[mii interface function]
   9980  *
   9981  *	Read a PHY register on the I2100 and I211.
   9982  * This could be handled by the PHY layer if we didn't have to lock the
   9983  * ressource ...
   9984  */
   9985 static int
   9986 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9987 {
   9988 	struct wm_softc *sc = device_private(self);
   9989 	int page, offset;
   9990 	int rv;
   9991 
   9992 	/* Acquire semaphore */
   9993 	if (sc->phy.acquire(sc)) {
   9994 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9995 		    __func__);
   9996 		return 0;
   9997 	}
   9998 
   9999 	/* Page select */
   10000 	page = reg >> GS40G_PAGE_SHIFT;
   10001 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10002 
   10003 	/* Read reg */
   10004 	offset = reg & GS40G_OFFSET_MASK;
   10005 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10006 
   10007 	sc->phy.release(sc);
   10008 	return rv;
   10009 }
   10010 
   10011 /*
   10012  * wm_gmii_gs40g_writereg:	[mii interface function]
   10013  *
   10014  *	Write a PHY register on the I210 and I211.
   10015  * This could be handled by the PHY layer if we didn't have to lock the
   10016  * ressource ...
   10017  */
   10018 static void
   10019 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10020 {
   10021 	struct wm_softc *sc = device_private(self);
   10022 	int page, offset;
   10023 
   10024 	/* Acquire semaphore */
   10025 	if (sc->phy.acquire(sc)) {
   10026 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10027 		    __func__);
   10028 		return;
   10029 	}
   10030 
   10031 	/* Page select */
   10032 	page = reg >> GS40G_PAGE_SHIFT;
   10033 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10034 
   10035 	/* Write reg */
   10036 	offset = reg & GS40G_OFFSET_MASK;
   10037 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10038 
   10039 	/* Release semaphore */
   10040 	sc->phy.release(sc);
   10041 }
   10042 
   10043 /*
   10044  * wm_gmii_statchg:	[mii interface function]
   10045  *
   10046  *	Callback from MII layer when media changes.
   10047  */
   10048 static void
   10049 wm_gmii_statchg(struct ifnet *ifp)
   10050 {
   10051 	struct wm_softc *sc = ifp->if_softc;
   10052 	struct mii_data *mii = &sc->sc_mii;
   10053 
   10054 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10055 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10056 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10057 
   10058 	/*
   10059 	 * Get flow control negotiation result.
   10060 	 */
   10061 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10062 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10063 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10064 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10065 	}
   10066 
   10067 	if (sc->sc_flowflags & IFM_FLOW) {
   10068 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10069 			sc->sc_ctrl |= CTRL_TFCE;
   10070 			sc->sc_fcrtl |= FCRTL_XONE;
   10071 		}
   10072 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10073 			sc->sc_ctrl |= CTRL_RFCE;
   10074 	}
   10075 
   10076 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10077 		DPRINTF(WM_DEBUG_LINK,
   10078 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10079 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10080 	} else {
   10081 		DPRINTF(WM_DEBUG_LINK,
   10082 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10083 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10084 	}
   10085 
   10086 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10087 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10088 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10089 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10090 	if (sc->sc_type == WM_T_80003) {
   10091 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10092 		case IFM_1000_T:
   10093 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10094 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10095 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10096 			break;
   10097 		default:
   10098 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10099 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10100 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10101 			break;
   10102 		}
   10103 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10104 	}
   10105 }
   10106 
   10107 /* kumeran related (80003, ICH* and PCH*) */
   10108 
   10109 /*
   10110  * wm_kmrn_readreg:
   10111  *
   10112  *	Read a kumeran register
   10113  */
   10114 static int
   10115 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10116 {
   10117 	int rv;
   10118 
   10119 	if (sc->sc_type == WM_T_80003)
   10120 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10121 	else
   10122 		rv = sc->phy.acquire(sc);
   10123 	if (rv != 0) {
   10124 		aprint_error_dev(sc->sc_dev,
   10125 		    "%s: failed to get semaphore\n", __func__);
   10126 		return 0;
   10127 	}
   10128 
   10129 	rv = wm_kmrn_readreg_locked(sc, reg);
   10130 
   10131 	if (sc->sc_type == WM_T_80003)
   10132 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10133 	else
   10134 		sc->phy.release(sc);
   10135 
   10136 	return rv;
   10137 }
   10138 
   10139 static int
   10140 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10141 {
   10142 	int rv;
   10143 
   10144 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10145 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10146 	    KUMCTRLSTA_REN);
   10147 	CSR_WRITE_FLUSH(sc);
   10148 	delay(2);
   10149 
   10150 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10151 
   10152 	return rv;
   10153 }
   10154 
   10155 /*
   10156  * wm_kmrn_writereg:
   10157  *
   10158  *	Write a kumeran register
   10159  */
   10160 static void
   10161 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10162 {
   10163 	int rv;
   10164 
   10165 	if (sc->sc_type == WM_T_80003)
   10166 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10167 	else
   10168 		rv = sc->phy.acquire(sc);
   10169 	if (rv != 0) {
   10170 		aprint_error_dev(sc->sc_dev,
   10171 		    "%s: failed to get semaphore\n", __func__);
   10172 		return;
   10173 	}
   10174 
   10175 	wm_kmrn_writereg_locked(sc, reg, val);
   10176 
   10177 	if (sc->sc_type == WM_T_80003)
   10178 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10179 	else
   10180 		sc->phy.release(sc);
   10181 }
   10182 
   10183 static void
   10184 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10185 {
   10186 
   10187 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10188 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10189 	    (val & KUMCTRLSTA_MASK));
   10190 }
   10191 
   10192 /* SGMII related */
   10193 
   10194 /*
   10195  * wm_sgmii_uses_mdio
   10196  *
   10197  * Check whether the transaction is to the internal PHY or the external
   10198  * MDIO interface. Return true if it's MDIO.
   10199  */
   10200 static bool
   10201 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10202 {
   10203 	uint32_t reg;
   10204 	bool ismdio = false;
   10205 
   10206 	switch (sc->sc_type) {
   10207 	case WM_T_82575:
   10208 	case WM_T_82576:
   10209 		reg = CSR_READ(sc, WMREG_MDIC);
   10210 		ismdio = ((reg & MDIC_DEST) != 0);
   10211 		break;
   10212 	case WM_T_82580:
   10213 	case WM_T_I350:
   10214 	case WM_T_I354:
   10215 	case WM_T_I210:
   10216 	case WM_T_I211:
   10217 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10218 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10219 		break;
   10220 	default:
   10221 		break;
   10222 	}
   10223 
   10224 	return ismdio;
   10225 }
   10226 
   10227 /*
   10228  * wm_sgmii_readreg:	[mii interface function]
   10229  *
   10230  *	Read a PHY register on the SGMII
   10231  * This could be handled by the PHY layer if we didn't have to lock the
   10232  * ressource ...
   10233  */
   10234 static int
   10235 wm_sgmii_readreg(device_t self, int phy, int reg)
   10236 {
   10237 	struct wm_softc *sc = device_private(self);
   10238 	uint32_t i2ccmd;
   10239 	int i, rv;
   10240 
   10241 	if (sc->phy.acquire(sc)) {
   10242 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10243 		    __func__);
   10244 		return 0;
   10245 	}
   10246 
   10247 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10248 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10249 	    | I2CCMD_OPCODE_READ;
   10250 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10251 
   10252 	/* Poll the ready bit */
   10253 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10254 		delay(50);
   10255 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10256 		if (i2ccmd & I2CCMD_READY)
   10257 			break;
   10258 	}
   10259 	if ((i2ccmd & I2CCMD_READY) == 0)
   10260 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10261 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10262 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10263 
   10264 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10265 
   10266 	sc->phy.release(sc);
   10267 	return rv;
   10268 }
   10269 
   10270 /*
   10271  * wm_sgmii_writereg:	[mii interface function]
   10272  *
   10273  *	Write a PHY register on the SGMII.
   10274  * This could be handled by the PHY layer if we didn't have to lock the
   10275  * ressource ...
   10276  */
   10277 static void
   10278 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10279 {
   10280 	struct wm_softc *sc = device_private(self);
   10281 	uint32_t i2ccmd;
   10282 	int i;
   10283 	int val_swapped;
   10284 
   10285 	if (sc->phy.acquire(sc) != 0) {
   10286 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10287 		    __func__);
   10288 		return;
   10289 	}
   10290 	/* Swap the data bytes for the I2C interface */
   10291 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10292 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10293 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10294 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10295 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10296 
   10297 	/* Poll the ready bit */
   10298 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10299 		delay(50);
   10300 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10301 		if (i2ccmd & I2CCMD_READY)
   10302 			break;
   10303 	}
   10304 	if ((i2ccmd & I2CCMD_READY) == 0)
   10305 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10306 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10307 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10308 
   10309 	sc->phy.release(sc);
   10310 }
   10311 
   10312 /* TBI related */
   10313 
   10314 /*
   10315  * wm_tbi_mediainit:
   10316  *
   10317  *	Initialize media for use on 1000BASE-X devices.
   10318  */
   10319 static void
   10320 wm_tbi_mediainit(struct wm_softc *sc)
   10321 {
   10322 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10323 	const char *sep = "";
   10324 
   10325 	if (sc->sc_type < WM_T_82543)
   10326 		sc->sc_tipg = TIPG_WM_DFLT;
   10327 	else
   10328 		sc->sc_tipg = TIPG_LG_DFLT;
   10329 
   10330 	sc->sc_tbi_serdes_anegticks = 5;
   10331 
   10332 	/* Initialize our media structures */
   10333 	sc->sc_mii.mii_ifp = ifp;
   10334 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10335 
   10336 	if ((sc->sc_type >= WM_T_82575)
   10337 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10338 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10339 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10340 	else
   10341 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10342 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10343 
   10344 	/*
   10345 	 * SWD Pins:
   10346 	 *
   10347 	 *	0 = Link LED (output)
   10348 	 *	1 = Loss Of Signal (input)
   10349 	 */
   10350 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10351 
   10352 	/* XXX Perhaps this is only for TBI */
   10353 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10354 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10355 
   10356 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10357 		sc->sc_ctrl &= ~CTRL_LRST;
   10358 
   10359 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10360 
   10361 #define	ADD(ss, mm, dd)							\
   10362 do {									\
   10363 	aprint_normal("%s%s", sep, ss);					\
   10364 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10365 	sep = ", ";							\
   10366 } while (/*CONSTCOND*/0)
   10367 
   10368 	aprint_normal_dev(sc->sc_dev, "");
   10369 
   10370 	if (sc->sc_type == WM_T_I354) {
   10371 		uint32_t status;
   10372 
   10373 		status = CSR_READ(sc, WMREG_STATUS);
   10374 		if (((status & STATUS_2P5_SKU) != 0)
   10375 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10376 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10377 		} else
   10378 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10379 	} else if (sc->sc_type == WM_T_82545) {
   10380 		/* Only 82545 is LX (XXX except SFP) */
   10381 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10382 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10383 	} else {
   10384 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10385 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10386 	}
   10387 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10388 	aprint_normal("\n");
   10389 
   10390 #undef ADD
   10391 
   10392 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10393 }
   10394 
   10395 /*
   10396  * wm_tbi_mediachange:	[ifmedia interface function]
   10397  *
   10398  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10399  */
   10400 static int
   10401 wm_tbi_mediachange(struct ifnet *ifp)
   10402 {
   10403 	struct wm_softc *sc = ifp->if_softc;
   10404 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10405 	uint32_t status;
   10406 	int i;
   10407 
   10408 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10409 		/* XXX need some work for >= 82571 and < 82575 */
   10410 		if (sc->sc_type < WM_T_82575)
   10411 			return 0;
   10412 	}
   10413 
   10414 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10415 	    || (sc->sc_type >= WM_T_82575))
   10416 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10417 
   10418 	sc->sc_ctrl &= ~CTRL_LRST;
   10419 	sc->sc_txcw = TXCW_ANE;
   10420 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10421 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10422 	else if (ife->ifm_media & IFM_FDX)
   10423 		sc->sc_txcw |= TXCW_FD;
   10424 	else
   10425 		sc->sc_txcw |= TXCW_HD;
   10426 
   10427 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10428 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10429 
   10430 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10431 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10432 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10433 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10434 	CSR_WRITE_FLUSH(sc);
   10435 	delay(1000);
   10436 
   10437 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10438 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10439 
   10440 	/*
   10441 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10442 	 * optics detect a signal, 0 if they don't.
   10443 	 */
   10444 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10445 		/* Have signal; wait for the link to come up. */
   10446 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10447 			delay(10000);
   10448 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10449 				break;
   10450 		}
   10451 
   10452 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10453 			    device_xname(sc->sc_dev),i));
   10454 
   10455 		status = CSR_READ(sc, WMREG_STATUS);
   10456 		DPRINTF(WM_DEBUG_LINK,
   10457 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10458 			device_xname(sc->sc_dev),status, STATUS_LU));
   10459 		if (status & STATUS_LU) {
   10460 			/* Link is up. */
   10461 			DPRINTF(WM_DEBUG_LINK,
   10462 			    ("%s: LINK: set media -> link up %s\n",
   10463 			    device_xname(sc->sc_dev),
   10464 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10465 
   10466 			/*
   10467 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10468 			 * so we should update sc->sc_ctrl
   10469 			 */
   10470 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10471 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10472 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10473 			if (status & STATUS_FD)
   10474 				sc->sc_tctl |=
   10475 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10476 			else
   10477 				sc->sc_tctl |=
   10478 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10479 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10480 				sc->sc_fcrtl |= FCRTL_XONE;
   10481 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10482 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10483 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10484 				      sc->sc_fcrtl);
   10485 			sc->sc_tbi_linkup = 1;
   10486 		} else {
   10487 			if (i == WM_LINKUP_TIMEOUT)
   10488 				wm_check_for_link(sc);
   10489 			/* Link is down. */
   10490 			DPRINTF(WM_DEBUG_LINK,
   10491 			    ("%s: LINK: set media -> link down\n",
   10492 			    device_xname(sc->sc_dev)));
   10493 			sc->sc_tbi_linkup = 0;
   10494 		}
   10495 	} else {
   10496 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10497 		    device_xname(sc->sc_dev)));
   10498 		sc->sc_tbi_linkup = 0;
   10499 	}
   10500 
   10501 	wm_tbi_serdes_set_linkled(sc);
   10502 
   10503 	return 0;
   10504 }
   10505 
   10506 /*
   10507  * wm_tbi_mediastatus:	[ifmedia interface function]
   10508  *
   10509  *	Get the current interface media status on a 1000BASE-X device.
   10510  */
   10511 static void
   10512 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10513 {
   10514 	struct wm_softc *sc = ifp->if_softc;
   10515 	uint32_t ctrl, status;
   10516 
   10517 	ifmr->ifm_status = IFM_AVALID;
   10518 	ifmr->ifm_active = IFM_ETHER;
   10519 
   10520 	status = CSR_READ(sc, WMREG_STATUS);
   10521 	if ((status & STATUS_LU) == 0) {
   10522 		ifmr->ifm_active |= IFM_NONE;
   10523 		return;
   10524 	}
   10525 
   10526 	ifmr->ifm_status |= IFM_ACTIVE;
   10527 	/* Only 82545 is LX */
   10528 	if (sc->sc_type == WM_T_82545)
   10529 		ifmr->ifm_active |= IFM_1000_LX;
   10530 	else
   10531 		ifmr->ifm_active |= IFM_1000_SX;
   10532 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10533 		ifmr->ifm_active |= IFM_FDX;
   10534 	else
   10535 		ifmr->ifm_active |= IFM_HDX;
   10536 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10537 	if (ctrl & CTRL_RFCE)
   10538 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10539 	if (ctrl & CTRL_TFCE)
   10540 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10541 }
   10542 
   10543 /* XXX TBI only */
   10544 static int
   10545 wm_check_for_link(struct wm_softc *sc)
   10546 {
   10547 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10548 	uint32_t rxcw;
   10549 	uint32_t ctrl;
   10550 	uint32_t status;
   10551 	uint32_t sig;
   10552 
   10553 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10554 		/* XXX need some work for >= 82571 */
   10555 		if (sc->sc_type >= WM_T_82571) {
   10556 			sc->sc_tbi_linkup = 1;
   10557 			return 0;
   10558 		}
   10559 	}
   10560 
   10561 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10562 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10563 	status = CSR_READ(sc, WMREG_STATUS);
   10564 
   10565 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10566 
   10567 	DPRINTF(WM_DEBUG_LINK,
   10568 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10569 		device_xname(sc->sc_dev), __func__,
   10570 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10571 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10572 
   10573 	/*
   10574 	 * SWDPIN   LU RXCW
   10575 	 *      0    0    0
   10576 	 *      0    0    1	(should not happen)
   10577 	 *      0    1    0	(should not happen)
   10578 	 *      0    1    1	(should not happen)
   10579 	 *      1    0    0	Disable autonego and force linkup
   10580 	 *      1    0    1	got /C/ but not linkup yet
   10581 	 *      1    1    0	(linkup)
   10582 	 *      1    1    1	If IFM_AUTO, back to autonego
   10583 	 *
   10584 	 */
   10585 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10586 	    && ((status & STATUS_LU) == 0)
   10587 	    && ((rxcw & RXCW_C) == 0)) {
   10588 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10589 			__func__));
   10590 		sc->sc_tbi_linkup = 0;
   10591 		/* Disable auto-negotiation in the TXCW register */
   10592 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10593 
   10594 		/*
   10595 		 * Force link-up and also force full-duplex.
   10596 		 *
   10597 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10598 		 * so we should update sc->sc_ctrl
   10599 		 */
   10600 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10601 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10602 	} else if (((status & STATUS_LU) != 0)
   10603 	    && ((rxcw & RXCW_C) != 0)
   10604 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10605 		sc->sc_tbi_linkup = 1;
   10606 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10607 			__func__));
   10608 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10609 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10610 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10611 	    && ((rxcw & RXCW_C) != 0)) {
   10612 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10613 	} else {
   10614 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10615 			status));
   10616 	}
   10617 
   10618 	return 0;
   10619 }
   10620 
   10621 /*
   10622  * wm_tbi_tick:
   10623  *
   10624  *	Check the link on TBI devices.
   10625  *	This function acts as mii_tick().
   10626  */
   10627 static void
   10628 wm_tbi_tick(struct wm_softc *sc)
   10629 {
   10630 	struct mii_data *mii = &sc->sc_mii;
   10631 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10632 	uint32_t status;
   10633 
   10634 	KASSERT(WM_CORE_LOCKED(sc));
   10635 
   10636 	status = CSR_READ(sc, WMREG_STATUS);
   10637 
   10638 	/* XXX is this needed? */
   10639 	(void)CSR_READ(sc, WMREG_RXCW);
   10640 	(void)CSR_READ(sc, WMREG_CTRL);
   10641 
   10642 	/* set link status */
   10643 	if ((status & STATUS_LU) == 0) {
   10644 		DPRINTF(WM_DEBUG_LINK,
   10645 		    ("%s: LINK: checklink -> down\n",
   10646 			device_xname(sc->sc_dev)));
   10647 		sc->sc_tbi_linkup = 0;
   10648 	} else if (sc->sc_tbi_linkup == 0) {
   10649 		DPRINTF(WM_DEBUG_LINK,
   10650 		    ("%s: LINK: checklink -> up %s\n",
   10651 			device_xname(sc->sc_dev),
   10652 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10653 		sc->sc_tbi_linkup = 1;
   10654 		sc->sc_tbi_serdes_ticks = 0;
   10655 	}
   10656 
   10657 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10658 		goto setled;
   10659 
   10660 	if ((status & STATUS_LU) == 0) {
   10661 		sc->sc_tbi_linkup = 0;
   10662 		/* If the timer expired, retry autonegotiation */
   10663 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10664 		    && (++sc->sc_tbi_serdes_ticks
   10665 			>= sc->sc_tbi_serdes_anegticks)) {
   10666 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10667 			sc->sc_tbi_serdes_ticks = 0;
   10668 			/*
   10669 			 * Reset the link, and let autonegotiation do
   10670 			 * its thing
   10671 			 */
   10672 			sc->sc_ctrl |= CTRL_LRST;
   10673 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10674 			CSR_WRITE_FLUSH(sc);
   10675 			delay(1000);
   10676 			sc->sc_ctrl &= ~CTRL_LRST;
   10677 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10678 			CSR_WRITE_FLUSH(sc);
   10679 			delay(1000);
   10680 			CSR_WRITE(sc, WMREG_TXCW,
   10681 			    sc->sc_txcw & ~TXCW_ANE);
   10682 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10683 		}
   10684 	}
   10685 
   10686 setled:
   10687 	wm_tbi_serdes_set_linkled(sc);
   10688 }
   10689 
   10690 /* SERDES related */
   10691 static void
   10692 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10693 {
   10694 	uint32_t reg;
   10695 
   10696 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10697 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10698 		return;
   10699 
   10700 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10701 	reg |= PCS_CFG_PCS_EN;
   10702 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10703 
   10704 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10705 	reg &= ~CTRL_EXT_SWDPIN(3);
   10706 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10707 	CSR_WRITE_FLUSH(sc);
   10708 }
   10709 
   10710 static int
   10711 wm_serdes_mediachange(struct ifnet *ifp)
   10712 {
   10713 	struct wm_softc *sc = ifp->if_softc;
   10714 	bool pcs_autoneg = true; /* XXX */
   10715 	uint32_t ctrl_ext, pcs_lctl, reg;
   10716 
   10717 	/* XXX Currently, this function is not called on 8257[12] */
   10718 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10719 	    || (sc->sc_type >= WM_T_82575))
   10720 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10721 
   10722 	wm_serdes_power_up_link_82575(sc);
   10723 
   10724 	sc->sc_ctrl |= CTRL_SLU;
   10725 
   10726 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10727 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10728 
   10729 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10730 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10731 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10732 	case CTRL_EXT_LINK_MODE_SGMII:
   10733 		pcs_autoneg = true;
   10734 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10735 		break;
   10736 	case CTRL_EXT_LINK_MODE_1000KX:
   10737 		pcs_autoneg = false;
   10738 		/* FALLTHROUGH */
   10739 	default:
   10740 		if ((sc->sc_type == WM_T_82575)
   10741 		    || (sc->sc_type == WM_T_82576)) {
   10742 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10743 				pcs_autoneg = false;
   10744 		}
   10745 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10746 		    | CTRL_FRCFDX;
   10747 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10748 	}
   10749 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10750 
   10751 	if (pcs_autoneg) {
   10752 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10753 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10754 
   10755 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10756 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10757 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10758 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10759 	} else
   10760 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10761 
   10762 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10763 
   10764 
   10765 	return 0;
   10766 }
   10767 
   10768 static void
   10769 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10770 {
   10771 	struct wm_softc *sc = ifp->if_softc;
   10772 	struct mii_data *mii = &sc->sc_mii;
   10773 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10774 	uint32_t pcs_adv, pcs_lpab, reg;
   10775 
   10776 	ifmr->ifm_status = IFM_AVALID;
   10777 	ifmr->ifm_active = IFM_ETHER;
   10778 
   10779 	/* Check PCS */
   10780 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10781 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10782 		ifmr->ifm_active |= IFM_NONE;
   10783 		sc->sc_tbi_linkup = 0;
   10784 		goto setled;
   10785 	}
   10786 
   10787 	sc->sc_tbi_linkup = 1;
   10788 	ifmr->ifm_status |= IFM_ACTIVE;
   10789 	if (sc->sc_type == WM_T_I354) {
   10790 		uint32_t status;
   10791 
   10792 		status = CSR_READ(sc, WMREG_STATUS);
   10793 		if (((status & STATUS_2P5_SKU) != 0)
   10794 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10795 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10796 		} else
   10797 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10798 	} else {
   10799 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10800 		case PCS_LSTS_SPEED_10:
   10801 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10802 			break;
   10803 		case PCS_LSTS_SPEED_100:
   10804 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10805 			break;
   10806 		case PCS_LSTS_SPEED_1000:
   10807 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10808 			break;
   10809 		default:
   10810 			device_printf(sc->sc_dev, "Unknown speed\n");
   10811 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10812 			break;
   10813 		}
   10814 	}
   10815 	if ((reg & PCS_LSTS_FDX) != 0)
   10816 		ifmr->ifm_active |= IFM_FDX;
   10817 	else
   10818 		ifmr->ifm_active |= IFM_HDX;
   10819 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10820 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10821 		/* Check flow */
   10822 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10823 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10824 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10825 			goto setled;
   10826 		}
   10827 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10828 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10829 		DPRINTF(WM_DEBUG_LINK,
   10830 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10831 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10832 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10833 			mii->mii_media_active |= IFM_FLOW
   10834 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10835 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10836 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10837 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10838 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10839 			mii->mii_media_active |= IFM_FLOW
   10840 			    | IFM_ETH_TXPAUSE;
   10841 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10842 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10843 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10844 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10845 			mii->mii_media_active |= IFM_FLOW
   10846 			    | IFM_ETH_RXPAUSE;
   10847 		}
   10848 	}
   10849 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10850 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10851 setled:
   10852 	wm_tbi_serdes_set_linkled(sc);
   10853 }
   10854 
   10855 /*
   10856  * wm_serdes_tick:
   10857  *
   10858  *	Check the link on serdes devices.
   10859  */
   10860 static void
   10861 wm_serdes_tick(struct wm_softc *sc)
   10862 {
   10863 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10864 	struct mii_data *mii = &sc->sc_mii;
   10865 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10866 	uint32_t reg;
   10867 
   10868 	KASSERT(WM_CORE_LOCKED(sc));
   10869 
   10870 	mii->mii_media_status = IFM_AVALID;
   10871 	mii->mii_media_active = IFM_ETHER;
   10872 
   10873 	/* Check PCS */
   10874 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10875 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10876 		mii->mii_media_status |= IFM_ACTIVE;
   10877 		sc->sc_tbi_linkup = 1;
   10878 		sc->sc_tbi_serdes_ticks = 0;
   10879 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10880 		if ((reg & PCS_LSTS_FDX) != 0)
   10881 			mii->mii_media_active |= IFM_FDX;
   10882 		else
   10883 			mii->mii_media_active |= IFM_HDX;
   10884 	} else {
   10885 		mii->mii_media_status |= IFM_NONE;
   10886 		sc->sc_tbi_linkup = 0;
   10887 		/* If the timer expired, retry autonegotiation */
   10888 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10889 		    && (++sc->sc_tbi_serdes_ticks
   10890 			>= sc->sc_tbi_serdes_anegticks)) {
   10891 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10892 			sc->sc_tbi_serdes_ticks = 0;
   10893 			/* XXX */
   10894 			wm_serdes_mediachange(ifp);
   10895 		}
   10896 	}
   10897 
   10898 	wm_tbi_serdes_set_linkled(sc);
   10899 }
   10900 
   10901 /* SFP related */
   10902 
   10903 static int
   10904 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10905 {
   10906 	uint32_t i2ccmd;
   10907 	int i;
   10908 
   10909 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10910 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10911 
   10912 	/* Poll the ready bit */
   10913 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10914 		delay(50);
   10915 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10916 		if (i2ccmd & I2CCMD_READY)
   10917 			break;
   10918 	}
   10919 	if ((i2ccmd & I2CCMD_READY) == 0)
   10920 		return -1;
   10921 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10922 		return -1;
   10923 
   10924 	*data = i2ccmd & 0x00ff;
   10925 
   10926 	return 0;
   10927 }
   10928 
   10929 static uint32_t
   10930 wm_sfp_get_media_type(struct wm_softc *sc)
   10931 {
   10932 	uint32_t ctrl_ext;
   10933 	uint8_t val = 0;
   10934 	int timeout = 3;
   10935 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10936 	int rv = -1;
   10937 
   10938 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10939 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10940 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10941 	CSR_WRITE_FLUSH(sc);
   10942 
   10943 	/* Read SFP module data */
   10944 	while (timeout) {
   10945 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10946 		if (rv == 0)
   10947 			break;
   10948 		delay(100*1000); /* XXX too big */
   10949 		timeout--;
   10950 	}
   10951 	if (rv != 0)
   10952 		goto out;
   10953 	switch (val) {
   10954 	case SFF_SFP_ID_SFF:
   10955 		aprint_normal_dev(sc->sc_dev,
   10956 		    "Module/Connector soldered to board\n");
   10957 		break;
   10958 	case SFF_SFP_ID_SFP:
   10959 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10960 		break;
   10961 	case SFF_SFP_ID_UNKNOWN:
   10962 		goto out;
   10963 	default:
   10964 		break;
   10965 	}
   10966 
   10967 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10968 	if (rv != 0) {
   10969 		goto out;
   10970 	}
   10971 
   10972 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10973 		mediatype = WM_MEDIATYPE_SERDES;
   10974 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10975 		sc->sc_flags |= WM_F_SGMII;
   10976 		mediatype = WM_MEDIATYPE_COPPER;
   10977 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10978 		sc->sc_flags |= WM_F_SGMII;
   10979 		mediatype = WM_MEDIATYPE_SERDES;
   10980 	}
   10981 
   10982 out:
   10983 	/* Restore I2C interface setting */
   10984 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10985 
   10986 	return mediatype;
   10987 }
   10988 
   10989 /*
   10990  * NVM related.
   10991  * Microwire, SPI (w/wo EERD) and Flash.
   10992  */
   10993 
   10994 /* Both spi and uwire */
   10995 
   10996 /*
   10997  * wm_eeprom_sendbits:
   10998  *
   10999  *	Send a series of bits to the EEPROM.
   11000  */
   11001 static void
   11002 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11003 {
   11004 	uint32_t reg;
   11005 	int x;
   11006 
   11007 	reg = CSR_READ(sc, WMREG_EECD);
   11008 
   11009 	for (x = nbits; x > 0; x--) {
   11010 		if (bits & (1U << (x - 1)))
   11011 			reg |= EECD_DI;
   11012 		else
   11013 			reg &= ~EECD_DI;
   11014 		CSR_WRITE(sc, WMREG_EECD, reg);
   11015 		CSR_WRITE_FLUSH(sc);
   11016 		delay(2);
   11017 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11018 		CSR_WRITE_FLUSH(sc);
   11019 		delay(2);
   11020 		CSR_WRITE(sc, WMREG_EECD, reg);
   11021 		CSR_WRITE_FLUSH(sc);
   11022 		delay(2);
   11023 	}
   11024 }
   11025 
   11026 /*
   11027  * wm_eeprom_recvbits:
   11028  *
   11029  *	Receive a series of bits from the EEPROM.
   11030  */
   11031 static void
   11032 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11033 {
   11034 	uint32_t reg, val;
   11035 	int x;
   11036 
   11037 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11038 
   11039 	val = 0;
   11040 	for (x = nbits; x > 0; x--) {
   11041 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11042 		CSR_WRITE_FLUSH(sc);
   11043 		delay(2);
   11044 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11045 			val |= (1U << (x - 1));
   11046 		CSR_WRITE(sc, WMREG_EECD, reg);
   11047 		CSR_WRITE_FLUSH(sc);
   11048 		delay(2);
   11049 	}
   11050 	*valp = val;
   11051 }
   11052 
   11053 /* Microwire */
   11054 
   11055 /*
   11056  * wm_nvm_read_uwire:
   11057  *
   11058  *	Read a word from the EEPROM using the MicroWire protocol.
   11059  */
   11060 static int
   11061 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11062 {
   11063 	uint32_t reg, val;
   11064 	int i;
   11065 
   11066 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11067 		device_xname(sc->sc_dev), __func__));
   11068 
   11069 	for (i = 0; i < wordcnt; i++) {
   11070 		/* Clear SK and DI. */
   11071 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11072 		CSR_WRITE(sc, WMREG_EECD, reg);
   11073 
   11074 		/*
   11075 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11076 		 * and Xen.
   11077 		 *
   11078 		 * We use this workaround only for 82540 because qemu's
   11079 		 * e1000 act as 82540.
   11080 		 */
   11081 		if (sc->sc_type == WM_T_82540) {
   11082 			reg |= EECD_SK;
   11083 			CSR_WRITE(sc, WMREG_EECD, reg);
   11084 			reg &= ~EECD_SK;
   11085 			CSR_WRITE(sc, WMREG_EECD, reg);
   11086 			CSR_WRITE_FLUSH(sc);
   11087 			delay(2);
   11088 		}
   11089 		/* XXX: end of workaround */
   11090 
   11091 		/* Set CHIP SELECT. */
   11092 		reg |= EECD_CS;
   11093 		CSR_WRITE(sc, WMREG_EECD, reg);
   11094 		CSR_WRITE_FLUSH(sc);
   11095 		delay(2);
   11096 
   11097 		/* Shift in the READ command. */
   11098 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11099 
   11100 		/* Shift in address. */
   11101 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11102 
   11103 		/* Shift out the data. */
   11104 		wm_eeprom_recvbits(sc, &val, 16);
   11105 		data[i] = val & 0xffff;
   11106 
   11107 		/* Clear CHIP SELECT. */
   11108 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11109 		CSR_WRITE(sc, WMREG_EECD, reg);
   11110 		CSR_WRITE_FLUSH(sc);
   11111 		delay(2);
   11112 	}
   11113 
   11114 	return 0;
   11115 }
   11116 
   11117 /* SPI */
   11118 
   11119 /*
   11120  * Set SPI and FLASH related information from the EECD register.
   11121  * For 82541 and 82547, the word size is taken from EEPROM.
   11122  */
   11123 static int
   11124 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11125 {
   11126 	int size;
   11127 	uint32_t reg;
   11128 	uint16_t data;
   11129 
   11130 	reg = CSR_READ(sc, WMREG_EECD);
   11131 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11132 
   11133 	/* Read the size of NVM from EECD by default */
   11134 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11135 	switch (sc->sc_type) {
   11136 	case WM_T_82541:
   11137 	case WM_T_82541_2:
   11138 	case WM_T_82547:
   11139 	case WM_T_82547_2:
   11140 		/* Set dummy value to access EEPROM */
   11141 		sc->sc_nvm_wordsize = 64;
   11142 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11143 		reg = data;
   11144 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11145 		if (size == 0)
   11146 			size = 6; /* 64 word size */
   11147 		else
   11148 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11149 		break;
   11150 	case WM_T_80003:
   11151 	case WM_T_82571:
   11152 	case WM_T_82572:
   11153 	case WM_T_82573: /* SPI case */
   11154 	case WM_T_82574: /* SPI case */
   11155 	case WM_T_82583: /* SPI case */
   11156 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11157 		if (size > 14)
   11158 			size = 14;
   11159 		break;
   11160 	case WM_T_82575:
   11161 	case WM_T_82576:
   11162 	case WM_T_82580:
   11163 	case WM_T_I350:
   11164 	case WM_T_I354:
   11165 	case WM_T_I210:
   11166 	case WM_T_I211:
   11167 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11168 		if (size > 15)
   11169 			size = 15;
   11170 		break;
   11171 	default:
   11172 		aprint_error_dev(sc->sc_dev,
   11173 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11174 		return -1;
   11175 		break;
   11176 	}
   11177 
   11178 	sc->sc_nvm_wordsize = 1 << size;
   11179 
   11180 	return 0;
   11181 }
   11182 
   11183 /*
   11184  * wm_nvm_ready_spi:
   11185  *
   11186  *	Wait for a SPI EEPROM to be ready for commands.
   11187  */
   11188 static int
   11189 wm_nvm_ready_spi(struct wm_softc *sc)
   11190 {
   11191 	uint32_t val;
   11192 	int usec;
   11193 
   11194 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11195 		device_xname(sc->sc_dev), __func__));
   11196 
   11197 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11198 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11199 		wm_eeprom_recvbits(sc, &val, 8);
   11200 		if ((val & SPI_SR_RDY) == 0)
   11201 			break;
   11202 	}
   11203 	if (usec >= SPI_MAX_RETRIES) {
   11204 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11205 		return 1;
   11206 	}
   11207 	return 0;
   11208 }
   11209 
   11210 /*
   11211  * wm_nvm_read_spi:
   11212  *
   11213  *	Read a work from the EEPROM using the SPI protocol.
   11214  */
   11215 static int
   11216 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11217 {
   11218 	uint32_t reg, val;
   11219 	int i;
   11220 	uint8_t opc;
   11221 
   11222 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11223 		device_xname(sc->sc_dev), __func__));
   11224 
   11225 	/* Clear SK and CS. */
   11226 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11227 	CSR_WRITE(sc, WMREG_EECD, reg);
   11228 	CSR_WRITE_FLUSH(sc);
   11229 	delay(2);
   11230 
   11231 	if (wm_nvm_ready_spi(sc))
   11232 		return 1;
   11233 
   11234 	/* Toggle CS to flush commands. */
   11235 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11236 	CSR_WRITE_FLUSH(sc);
   11237 	delay(2);
   11238 	CSR_WRITE(sc, WMREG_EECD, reg);
   11239 	CSR_WRITE_FLUSH(sc);
   11240 	delay(2);
   11241 
   11242 	opc = SPI_OPC_READ;
   11243 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11244 		opc |= SPI_OPC_A8;
   11245 
   11246 	wm_eeprom_sendbits(sc, opc, 8);
   11247 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11248 
   11249 	for (i = 0; i < wordcnt; i++) {
   11250 		wm_eeprom_recvbits(sc, &val, 16);
   11251 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11252 	}
   11253 
   11254 	/* Raise CS and clear SK. */
   11255 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11256 	CSR_WRITE(sc, WMREG_EECD, reg);
   11257 	CSR_WRITE_FLUSH(sc);
   11258 	delay(2);
   11259 
   11260 	return 0;
   11261 }
   11262 
   11263 /* Using with EERD */
   11264 
   11265 static int
   11266 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11267 {
   11268 	uint32_t attempts = 100000;
   11269 	uint32_t i, reg = 0;
   11270 	int32_t done = -1;
   11271 
   11272 	for (i = 0; i < attempts; i++) {
   11273 		reg = CSR_READ(sc, rw);
   11274 
   11275 		if (reg & EERD_DONE) {
   11276 			done = 0;
   11277 			break;
   11278 		}
   11279 		delay(5);
   11280 	}
   11281 
   11282 	return done;
   11283 }
   11284 
   11285 static int
   11286 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11287     uint16_t *data)
   11288 {
   11289 	int i, eerd = 0;
   11290 	int error = 0;
   11291 
   11292 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11293 		device_xname(sc->sc_dev), __func__));
   11294 
   11295 	for (i = 0; i < wordcnt; i++) {
   11296 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11297 
   11298 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11299 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11300 		if (error != 0)
   11301 			break;
   11302 
   11303 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11304 	}
   11305 
   11306 	return error;
   11307 }
   11308 
   11309 /* Flash */
   11310 
   11311 static int
   11312 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11313 {
   11314 	uint32_t eecd;
   11315 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11316 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11317 	uint8_t sig_byte = 0;
   11318 
   11319 	switch (sc->sc_type) {
   11320 	case WM_T_PCH_SPT:
   11321 		/*
   11322 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11323 		 * sector valid bits from the NVM.
   11324 		 */
   11325 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11326 		if ((*bank == 0) || (*bank == 1)) {
   11327 			aprint_error_dev(sc->sc_dev,
   11328 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11329 				*bank);
   11330 			return -1;
   11331 		} else {
   11332 			*bank = *bank - 2;
   11333 			return 0;
   11334 		}
   11335 	case WM_T_ICH8:
   11336 	case WM_T_ICH9:
   11337 		eecd = CSR_READ(sc, WMREG_EECD);
   11338 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11339 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11340 			return 0;
   11341 		}
   11342 		/* FALLTHROUGH */
   11343 	default:
   11344 		/* Default to 0 */
   11345 		*bank = 0;
   11346 
   11347 		/* Check bank 0 */
   11348 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11349 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11350 			*bank = 0;
   11351 			return 0;
   11352 		}
   11353 
   11354 		/* Check bank 1 */
   11355 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11356 		    &sig_byte);
   11357 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11358 			*bank = 1;
   11359 			return 0;
   11360 		}
   11361 	}
   11362 
   11363 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11364 		device_xname(sc->sc_dev)));
   11365 	return -1;
   11366 }
   11367 
   11368 /******************************************************************************
   11369  * This function does initial flash setup so that a new read/write/erase cycle
   11370  * can be started.
   11371  *
   11372  * sc - The pointer to the hw structure
   11373  ****************************************************************************/
   11374 static int32_t
   11375 wm_ich8_cycle_init(struct wm_softc *sc)
   11376 {
   11377 	uint16_t hsfsts;
   11378 	int32_t error = 1;
   11379 	int32_t i     = 0;
   11380 
   11381 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11382 
   11383 	/* May be check the Flash Des Valid bit in Hw status */
   11384 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11385 		return error;
   11386 	}
   11387 
   11388 	/* Clear FCERR in Hw status by writing 1 */
   11389 	/* Clear DAEL in Hw status by writing a 1 */
   11390 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11391 
   11392 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11393 
   11394 	/*
   11395 	 * Either we should have a hardware SPI cycle in progress bit to check
   11396 	 * against, in order to start a new cycle or FDONE bit should be
   11397 	 * changed in the hardware so that it is 1 after harware reset, which
   11398 	 * can then be used as an indication whether a cycle is in progress or
   11399 	 * has been completed .. we should also have some software semaphore
   11400 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11401 	 * threads access to those bits can be sequentiallized or a way so that
   11402 	 * 2 threads dont start the cycle at the same time
   11403 	 */
   11404 
   11405 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11406 		/*
   11407 		 * There is no cycle running at present, so we can start a
   11408 		 * cycle
   11409 		 */
   11410 
   11411 		/* Begin by setting Flash Cycle Done. */
   11412 		hsfsts |= HSFSTS_DONE;
   11413 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11414 		error = 0;
   11415 	} else {
   11416 		/*
   11417 		 * otherwise poll for sometime so the current cycle has a
   11418 		 * chance to end before giving up.
   11419 		 */
   11420 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11421 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11422 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11423 				error = 0;
   11424 				break;
   11425 			}
   11426 			delay(1);
   11427 		}
   11428 		if (error == 0) {
   11429 			/*
   11430 			 * Successful in waiting for previous cycle to timeout,
   11431 			 * now set the Flash Cycle Done.
   11432 			 */
   11433 			hsfsts |= HSFSTS_DONE;
   11434 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11435 		}
   11436 	}
   11437 	return error;
   11438 }
   11439 
   11440 /******************************************************************************
   11441  * This function starts a flash cycle and waits for its completion
   11442  *
   11443  * sc - The pointer to the hw structure
   11444  ****************************************************************************/
   11445 static int32_t
   11446 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11447 {
   11448 	uint16_t hsflctl;
   11449 	uint16_t hsfsts;
   11450 	int32_t error = 1;
   11451 	uint32_t i = 0;
   11452 
   11453 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11454 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11455 	hsflctl |= HSFCTL_GO;
   11456 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11457 
   11458 	/* Wait till FDONE bit is set to 1 */
   11459 	do {
   11460 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11461 		if (hsfsts & HSFSTS_DONE)
   11462 			break;
   11463 		delay(1);
   11464 		i++;
   11465 	} while (i < timeout);
   11466 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11467 		error = 0;
   11468 
   11469 	return error;
   11470 }
   11471 
   11472 /******************************************************************************
   11473  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11474  *
   11475  * sc - The pointer to the hw structure
   11476  * index - The index of the byte or word to read.
   11477  * size - Size of data to read, 1=byte 2=word, 4=dword
   11478  * data - Pointer to the word to store the value read.
   11479  *****************************************************************************/
   11480 static int32_t
   11481 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11482     uint32_t size, uint32_t *data)
   11483 {
   11484 	uint16_t hsfsts;
   11485 	uint16_t hsflctl;
   11486 	uint32_t flash_linear_address;
   11487 	uint32_t flash_data = 0;
   11488 	int32_t error = 1;
   11489 	int32_t count = 0;
   11490 
   11491 	if (size < 1  || size > 4 || data == 0x0 ||
   11492 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11493 		return error;
   11494 
   11495 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11496 	    sc->sc_ich8_flash_base;
   11497 
   11498 	do {
   11499 		delay(1);
   11500 		/* Steps */
   11501 		error = wm_ich8_cycle_init(sc);
   11502 		if (error)
   11503 			break;
   11504 
   11505 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11506 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11507 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11508 		    & HSFCTL_BCOUNT_MASK;
   11509 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11510 		if (sc->sc_type == WM_T_PCH_SPT) {
   11511 			/*
   11512 			 * In SPT, This register is in Lan memory space, not
   11513 			 * flash. Therefore, only 32 bit access is supported.
   11514 			 */
   11515 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11516 			    (uint32_t)hsflctl);
   11517 		} else
   11518 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11519 
   11520 		/*
   11521 		 * Write the last 24 bits of index into Flash Linear address
   11522 		 * field in Flash Address
   11523 		 */
   11524 		/* TODO: TBD maybe check the index against the size of flash */
   11525 
   11526 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11527 
   11528 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11529 
   11530 		/*
   11531 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11532 		 * the whole sequence a few more times, else read in (shift in)
   11533 		 * the Flash Data0, the order is least significant byte first
   11534 		 * msb to lsb
   11535 		 */
   11536 		if (error == 0) {
   11537 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11538 			if (size == 1)
   11539 				*data = (uint8_t)(flash_data & 0x000000FF);
   11540 			else if (size == 2)
   11541 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11542 			else if (size == 4)
   11543 				*data = (uint32_t)flash_data;
   11544 			break;
   11545 		} else {
   11546 			/*
   11547 			 * If we've gotten here, then things are probably
   11548 			 * completely hosed, but if the error condition is
   11549 			 * detected, it won't hurt to give it another try...
   11550 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11551 			 */
   11552 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11553 			if (hsfsts & HSFSTS_ERR) {
   11554 				/* Repeat for some time before giving up. */
   11555 				continue;
   11556 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11557 				break;
   11558 		}
   11559 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11560 
   11561 	return error;
   11562 }
   11563 
   11564 /******************************************************************************
   11565  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11566  *
   11567  * sc - pointer to wm_hw structure
   11568  * index - The index of the byte to read.
   11569  * data - Pointer to a byte to store the value read.
   11570  *****************************************************************************/
   11571 static int32_t
   11572 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11573 {
   11574 	int32_t status;
   11575 	uint32_t word = 0;
   11576 
   11577 	status = wm_read_ich8_data(sc, index, 1, &word);
   11578 	if (status == 0)
   11579 		*data = (uint8_t)word;
   11580 	else
   11581 		*data = 0;
   11582 
   11583 	return status;
   11584 }
   11585 
   11586 /******************************************************************************
   11587  * Reads a word from the NVM using the ICH8 flash access registers.
   11588  *
   11589  * sc - pointer to wm_hw structure
   11590  * index - The starting byte index of the word to read.
   11591  * data - Pointer to a word to store the value read.
   11592  *****************************************************************************/
   11593 static int32_t
   11594 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11595 {
   11596 	int32_t status;
   11597 	uint32_t word = 0;
   11598 
   11599 	status = wm_read_ich8_data(sc, index, 2, &word);
   11600 	if (status == 0)
   11601 		*data = (uint16_t)word;
   11602 	else
   11603 		*data = 0;
   11604 
   11605 	return status;
   11606 }
   11607 
   11608 /******************************************************************************
   11609  * Reads a dword from the NVM using the ICH8 flash access registers.
   11610  *
   11611  * sc - pointer to wm_hw structure
   11612  * index - The starting byte index of the word to read.
   11613  * data - Pointer to a word to store the value read.
   11614  *****************************************************************************/
   11615 static int32_t
   11616 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11617 {
   11618 	int32_t status;
   11619 
   11620 	status = wm_read_ich8_data(sc, index, 4, data);
   11621 	return status;
   11622 }
   11623 
   11624 /******************************************************************************
   11625  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11626  * register.
   11627  *
   11628  * sc - Struct containing variables accessed by shared code
   11629  * offset - offset of word in the EEPROM to read
   11630  * data - word read from the EEPROM
   11631  * words - number of words to read
   11632  *****************************************************************************/
   11633 static int
   11634 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11635 {
   11636 	int32_t  error = 0;
   11637 	uint32_t flash_bank = 0;
   11638 	uint32_t act_offset = 0;
   11639 	uint32_t bank_offset = 0;
   11640 	uint16_t word = 0;
   11641 	uint16_t i = 0;
   11642 
   11643 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11644 		device_xname(sc->sc_dev), __func__));
   11645 
   11646 	/*
   11647 	 * We need to know which is the valid flash bank.  In the event
   11648 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11649 	 * managing flash_bank.  So it cannot be trusted and needs
   11650 	 * to be updated with each read.
   11651 	 */
   11652 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11653 	if (error) {
   11654 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11655 			device_xname(sc->sc_dev)));
   11656 		flash_bank = 0;
   11657 	}
   11658 
   11659 	/*
   11660 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11661 	 * size
   11662 	 */
   11663 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11664 
   11665 	error = wm_get_swfwhw_semaphore(sc);
   11666 	if (error) {
   11667 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11668 		    __func__);
   11669 		return error;
   11670 	}
   11671 
   11672 	for (i = 0; i < words; i++) {
   11673 		/* The NVM part needs a byte offset, hence * 2 */
   11674 		act_offset = bank_offset + ((offset + i) * 2);
   11675 		error = wm_read_ich8_word(sc, act_offset, &word);
   11676 		if (error) {
   11677 			aprint_error_dev(sc->sc_dev,
   11678 			    "%s: failed to read NVM\n", __func__);
   11679 			break;
   11680 		}
   11681 		data[i] = word;
   11682 	}
   11683 
   11684 	wm_put_swfwhw_semaphore(sc);
   11685 	return error;
   11686 }
   11687 
   11688 /******************************************************************************
   11689  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11690  * register.
   11691  *
   11692  * sc - Struct containing variables accessed by shared code
   11693  * offset - offset of word in the EEPROM to read
   11694  * data - word read from the EEPROM
   11695  * words - number of words to read
   11696  *****************************************************************************/
   11697 static int
   11698 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11699 {
   11700 	int32_t  error = 0;
   11701 	uint32_t flash_bank = 0;
   11702 	uint32_t act_offset = 0;
   11703 	uint32_t bank_offset = 0;
   11704 	uint32_t dword = 0;
   11705 	uint16_t i = 0;
   11706 
   11707 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11708 		device_xname(sc->sc_dev), __func__));
   11709 
   11710 	/*
   11711 	 * We need to know which is the valid flash bank.  In the event
   11712 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11713 	 * managing flash_bank.  So it cannot be trusted and needs
   11714 	 * to be updated with each read.
   11715 	 */
   11716 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11717 	if (error) {
   11718 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11719 			device_xname(sc->sc_dev)));
   11720 		flash_bank = 0;
   11721 	}
   11722 
   11723 	/*
   11724 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11725 	 * size
   11726 	 */
   11727 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11728 
   11729 	error = wm_get_swfwhw_semaphore(sc);
   11730 	if (error) {
   11731 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11732 		    __func__);
   11733 		return error;
   11734 	}
   11735 
   11736 	for (i = 0; i < words; i++) {
   11737 		/* The NVM part needs a byte offset, hence * 2 */
   11738 		act_offset = bank_offset + ((offset + i) * 2);
   11739 		/* but we must read dword aligned, so mask ... */
   11740 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11741 		if (error) {
   11742 			aprint_error_dev(sc->sc_dev,
   11743 			    "%s: failed to read NVM\n", __func__);
   11744 			break;
   11745 		}
   11746 		/* ... and pick out low or high word */
   11747 		if ((act_offset & 0x2) == 0)
   11748 			data[i] = (uint16_t)(dword & 0xFFFF);
   11749 		else
   11750 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11751 	}
   11752 
   11753 	wm_put_swfwhw_semaphore(sc);
   11754 	return error;
   11755 }
   11756 
   11757 /* iNVM */
   11758 
   11759 static int
   11760 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11761 {
   11762 	int32_t  rv = 0;
   11763 	uint32_t invm_dword;
   11764 	uint16_t i;
   11765 	uint8_t record_type, word_address;
   11766 
   11767 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11768 		device_xname(sc->sc_dev), __func__));
   11769 
   11770 	for (i = 0; i < INVM_SIZE; i++) {
   11771 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11772 		/* Get record type */
   11773 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11774 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11775 			break;
   11776 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11777 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11778 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11779 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11780 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11781 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11782 			if (word_address == address) {
   11783 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11784 				rv = 0;
   11785 				break;
   11786 			}
   11787 		}
   11788 	}
   11789 
   11790 	return rv;
   11791 }
   11792 
   11793 static int
   11794 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11795 {
   11796 	int rv = 0;
   11797 	int i;
   11798 
   11799 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11800 		device_xname(sc->sc_dev), __func__));
   11801 
   11802 	for (i = 0; i < words; i++) {
   11803 		switch (offset + i) {
   11804 		case NVM_OFF_MACADDR:
   11805 		case NVM_OFF_MACADDR1:
   11806 		case NVM_OFF_MACADDR2:
   11807 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11808 			if (rv != 0) {
   11809 				data[i] = 0xffff;
   11810 				rv = -1;
   11811 			}
   11812 			break;
   11813 		case NVM_OFF_CFG2:
   11814 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11815 			if (rv != 0) {
   11816 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11817 				rv = 0;
   11818 			}
   11819 			break;
   11820 		case NVM_OFF_CFG4:
   11821 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11822 			if (rv != 0) {
   11823 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11824 				rv = 0;
   11825 			}
   11826 			break;
   11827 		case NVM_OFF_LED_1_CFG:
   11828 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11829 			if (rv != 0) {
   11830 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11831 				rv = 0;
   11832 			}
   11833 			break;
   11834 		case NVM_OFF_LED_0_2_CFG:
   11835 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11836 			if (rv != 0) {
   11837 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11838 				rv = 0;
   11839 			}
   11840 			break;
   11841 		case NVM_OFF_ID_LED_SETTINGS:
   11842 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11843 			if (rv != 0) {
   11844 				*data = ID_LED_RESERVED_FFFF;
   11845 				rv = 0;
   11846 			}
   11847 			break;
   11848 		default:
   11849 			DPRINTF(WM_DEBUG_NVM,
   11850 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11851 			*data = NVM_RESERVED_WORD;
   11852 			break;
   11853 		}
   11854 	}
   11855 
   11856 	return rv;
   11857 }
   11858 
   11859 /* Lock, detecting NVM type, validate checksum, version and read */
   11860 
   11861 /*
   11862  * wm_nvm_acquire:
   11863  *
   11864  *	Perform the EEPROM handshake required on some chips.
   11865  */
   11866 static int
   11867 wm_nvm_acquire(struct wm_softc *sc)
   11868 {
   11869 	uint32_t reg;
   11870 	int x;
   11871 	int ret = 0;
   11872 
   11873 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11874 		device_xname(sc->sc_dev), __func__));
   11875 
   11876 	if (sc->sc_type >= WM_T_ICH8) {
   11877 		ret = wm_get_nvm_ich8lan(sc);
   11878 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11879 		ret = wm_get_swfwhw_semaphore(sc);
   11880 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11881 		/* This will also do wm_get_swsm_semaphore() if needed */
   11882 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11883 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11884 		ret = wm_get_swsm_semaphore(sc);
   11885 	}
   11886 
   11887 	if (ret) {
   11888 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11889 			__func__);
   11890 		return 1;
   11891 	}
   11892 
   11893 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11894 		reg = CSR_READ(sc, WMREG_EECD);
   11895 
   11896 		/* Request EEPROM access. */
   11897 		reg |= EECD_EE_REQ;
   11898 		CSR_WRITE(sc, WMREG_EECD, reg);
   11899 
   11900 		/* ..and wait for it to be granted. */
   11901 		for (x = 0; x < 1000; x++) {
   11902 			reg = CSR_READ(sc, WMREG_EECD);
   11903 			if (reg & EECD_EE_GNT)
   11904 				break;
   11905 			delay(5);
   11906 		}
   11907 		if ((reg & EECD_EE_GNT) == 0) {
   11908 			aprint_error_dev(sc->sc_dev,
   11909 			    "could not acquire EEPROM GNT\n");
   11910 			reg &= ~EECD_EE_REQ;
   11911 			CSR_WRITE(sc, WMREG_EECD, reg);
   11912 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11913 				wm_put_swfwhw_semaphore(sc);
   11914 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11915 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11916 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11917 				wm_put_swsm_semaphore(sc);
   11918 			return 1;
   11919 		}
   11920 	}
   11921 
   11922 	return 0;
   11923 }
   11924 
   11925 /*
   11926  * wm_nvm_release:
   11927  *
   11928  *	Release the EEPROM mutex.
   11929  */
   11930 static void
   11931 wm_nvm_release(struct wm_softc *sc)
   11932 {
   11933 	uint32_t reg;
   11934 
   11935 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11936 		device_xname(sc->sc_dev), __func__));
   11937 
   11938 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11939 		reg = CSR_READ(sc, WMREG_EECD);
   11940 		reg &= ~EECD_EE_REQ;
   11941 		CSR_WRITE(sc, WMREG_EECD, reg);
   11942 	}
   11943 
   11944 	if (sc->sc_type >= WM_T_ICH8) {
   11945 		wm_put_nvm_ich8lan(sc);
   11946 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11947 		wm_put_swfwhw_semaphore(sc);
   11948 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11949 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11950 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11951 		wm_put_swsm_semaphore(sc);
   11952 }
   11953 
   11954 static int
   11955 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11956 {
   11957 	uint32_t eecd = 0;
   11958 
   11959 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11960 	    || sc->sc_type == WM_T_82583) {
   11961 		eecd = CSR_READ(sc, WMREG_EECD);
   11962 
   11963 		/* Isolate bits 15 & 16 */
   11964 		eecd = ((eecd >> 15) & 0x03);
   11965 
   11966 		/* If both bits are set, device is Flash type */
   11967 		if (eecd == 0x03)
   11968 			return 0;
   11969 	}
   11970 	return 1;
   11971 }
   11972 
   11973 static int
   11974 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11975 {
   11976 	uint32_t eec;
   11977 
   11978 	eec = CSR_READ(sc, WMREG_EEC);
   11979 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11980 		return 1;
   11981 
   11982 	return 0;
   11983 }
   11984 
   11985 /*
   11986  * wm_nvm_validate_checksum
   11987  *
   11988  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11989  */
   11990 static int
   11991 wm_nvm_validate_checksum(struct wm_softc *sc)
   11992 {
   11993 	uint16_t checksum;
   11994 	uint16_t eeprom_data;
   11995 #ifdef WM_DEBUG
   11996 	uint16_t csum_wordaddr, valid_checksum;
   11997 #endif
   11998 	int i;
   11999 
   12000 	checksum = 0;
   12001 
   12002 	/* Don't check for I211 */
   12003 	if (sc->sc_type == WM_T_I211)
   12004 		return 0;
   12005 
   12006 #ifdef WM_DEBUG
   12007 	if (sc->sc_type == WM_T_PCH_LPT) {
   12008 		csum_wordaddr = NVM_OFF_COMPAT;
   12009 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12010 	} else {
   12011 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12012 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12013 	}
   12014 
   12015 	/* Dump EEPROM image for debug */
   12016 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12017 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12018 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12019 		/* XXX PCH_SPT? */
   12020 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12021 		if ((eeprom_data & valid_checksum) == 0) {
   12022 			DPRINTF(WM_DEBUG_NVM,
   12023 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12024 				device_xname(sc->sc_dev), eeprom_data,
   12025 				    valid_checksum));
   12026 		}
   12027 	}
   12028 
   12029 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12030 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12031 		for (i = 0; i < NVM_SIZE; i++) {
   12032 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12033 				printf("XXXX ");
   12034 			else
   12035 				printf("%04hx ", eeprom_data);
   12036 			if (i % 8 == 7)
   12037 				printf("\n");
   12038 		}
   12039 	}
   12040 
   12041 #endif /* WM_DEBUG */
   12042 
   12043 	for (i = 0; i < NVM_SIZE; i++) {
   12044 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12045 			return 1;
   12046 		checksum += eeprom_data;
   12047 	}
   12048 
   12049 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12050 #ifdef WM_DEBUG
   12051 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12052 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12053 #endif
   12054 	}
   12055 
   12056 	return 0;
   12057 }
   12058 
   12059 static void
   12060 wm_nvm_version_invm(struct wm_softc *sc)
   12061 {
   12062 	uint32_t dword;
   12063 
   12064 	/*
   12065 	 * Linux's code to decode version is very strange, so we don't
   12066 	 * obey that algorithm and just use word 61 as the document.
   12067 	 * Perhaps it's not perfect though...
   12068 	 *
   12069 	 * Example:
   12070 	 *
   12071 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12072 	 */
   12073 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12074 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12075 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12076 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12077 }
   12078 
   12079 static void
   12080 wm_nvm_version(struct wm_softc *sc)
   12081 {
   12082 	uint16_t major, minor, build, patch;
   12083 	uint16_t uid0, uid1;
   12084 	uint16_t nvm_data;
   12085 	uint16_t off;
   12086 	bool check_version = false;
   12087 	bool check_optionrom = false;
   12088 	bool have_build = false;
   12089 
   12090 	/*
   12091 	 * Version format:
   12092 	 *
   12093 	 * XYYZ
   12094 	 * X0YZ
   12095 	 * X0YY
   12096 	 *
   12097 	 * Example:
   12098 	 *
   12099 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12100 	 *	82571	0x50a6	5.10.6?
   12101 	 *	82572	0x506a	5.6.10?
   12102 	 *	82572EI	0x5069	5.6.9?
   12103 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12104 	 *		0x2013	2.1.3?
   12105 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12106 	 */
   12107 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12108 	switch (sc->sc_type) {
   12109 	case WM_T_82571:
   12110 	case WM_T_82572:
   12111 	case WM_T_82574:
   12112 	case WM_T_82583:
   12113 		check_version = true;
   12114 		check_optionrom = true;
   12115 		have_build = true;
   12116 		break;
   12117 	case WM_T_82575:
   12118 	case WM_T_82576:
   12119 	case WM_T_82580:
   12120 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12121 			check_version = true;
   12122 		break;
   12123 	case WM_T_I211:
   12124 		wm_nvm_version_invm(sc);
   12125 		goto printver;
   12126 	case WM_T_I210:
   12127 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12128 			wm_nvm_version_invm(sc);
   12129 			goto printver;
   12130 		}
   12131 		/* FALLTHROUGH */
   12132 	case WM_T_I350:
   12133 	case WM_T_I354:
   12134 		check_version = true;
   12135 		check_optionrom = true;
   12136 		break;
   12137 	default:
   12138 		return;
   12139 	}
   12140 	if (check_version) {
   12141 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12142 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12143 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12144 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12145 			build = nvm_data & NVM_BUILD_MASK;
   12146 			have_build = true;
   12147 		} else
   12148 			minor = nvm_data & 0x00ff;
   12149 
   12150 		/* Decimal */
   12151 		minor = (minor / 16) * 10 + (minor % 16);
   12152 		sc->sc_nvm_ver_major = major;
   12153 		sc->sc_nvm_ver_minor = minor;
   12154 
   12155 printver:
   12156 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12157 		    sc->sc_nvm_ver_minor);
   12158 		if (have_build) {
   12159 			sc->sc_nvm_ver_build = build;
   12160 			aprint_verbose(".%d", build);
   12161 		}
   12162 	}
   12163 	if (check_optionrom) {
   12164 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12165 		/* Option ROM Version */
   12166 		if ((off != 0x0000) && (off != 0xffff)) {
   12167 			off += NVM_COMBO_VER_OFF;
   12168 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12169 			wm_nvm_read(sc, off, 1, &uid0);
   12170 			if ((uid0 != 0) && (uid0 != 0xffff)
   12171 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12172 				/* 16bits */
   12173 				major = uid0 >> 8;
   12174 				build = (uid0 << 8) | (uid1 >> 8);
   12175 				patch = uid1 & 0x00ff;
   12176 				aprint_verbose(", option ROM Version %d.%d.%d",
   12177 				    major, build, patch);
   12178 			}
   12179 		}
   12180 	}
   12181 
   12182 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12183 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12184 }
   12185 
   12186 /*
   12187  * wm_nvm_read:
   12188  *
   12189  *	Read data from the serial EEPROM.
   12190  */
   12191 static int
   12192 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12193 {
   12194 	int rv;
   12195 
   12196 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12197 		device_xname(sc->sc_dev), __func__));
   12198 
   12199 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12200 		return 1;
   12201 
   12202 	if (wm_nvm_acquire(sc))
   12203 		return 1;
   12204 
   12205 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12206 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12207 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12208 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12209 	else if (sc->sc_type == WM_T_PCH_SPT)
   12210 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12211 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12212 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12213 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12214 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12215 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12216 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12217 	else
   12218 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12219 
   12220 	wm_nvm_release(sc);
   12221 	return rv;
   12222 }
   12223 
   12224 /*
   12225  * Hardware semaphores.
   12226  * Very complexed...
   12227  */
   12228 
   12229 static int
   12230 wm_get_null(struct wm_softc *sc)
   12231 {
   12232 
   12233 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12234 		device_xname(sc->sc_dev), __func__));
   12235 	return 0;
   12236 }
   12237 
   12238 static void
   12239 wm_put_null(struct wm_softc *sc)
   12240 {
   12241 
   12242 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12243 		device_xname(sc->sc_dev), __func__));
   12244 	return;
   12245 }
   12246 
   12247 /*
   12248  * Get hardware semaphore.
   12249  * Same as e1000_get_hw_semaphore_generic()
   12250  */
   12251 static int
   12252 wm_get_swsm_semaphore(struct wm_softc *sc)
   12253 {
   12254 	int32_t timeout;
   12255 	uint32_t swsm;
   12256 
   12257 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12258 		device_xname(sc->sc_dev), __func__));
   12259 	KASSERT(sc->sc_nvm_wordsize > 0);
   12260 
   12261 	/* Get the SW semaphore. */
   12262 	timeout = sc->sc_nvm_wordsize + 1;
   12263 	while (timeout) {
   12264 		swsm = CSR_READ(sc, WMREG_SWSM);
   12265 
   12266 		if ((swsm & SWSM_SMBI) == 0)
   12267 			break;
   12268 
   12269 		delay(50);
   12270 		timeout--;
   12271 	}
   12272 
   12273 	if (timeout == 0) {
   12274 		aprint_error_dev(sc->sc_dev,
   12275 		    "could not acquire SWSM SMBI\n");
   12276 		return 1;
   12277 	}
   12278 
   12279 	/* Get the FW semaphore. */
   12280 	timeout = sc->sc_nvm_wordsize + 1;
   12281 	while (timeout) {
   12282 		swsm = CSR_READ(sc, WMREG_SWSM);
   12283 		swsm |= SWSM_SWESMBI;
   12284 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12285 		/* If we managed to set the bit we got the semaphore. */
   12286 		swsm = CSR_READ(sc, WMREG_SWSM);
   12287 		if (swsm & SWSM_SWESMBI)
   12288 			break;
   12289 
   12290 		delay(50);
   12291 		timeout--;
   12292 	}
   12293 
   12294 	if (timeout == 0) {
   12295 		aprint_error_dev(sc->sc_dev,
   12296 		    "could not acquire SWSM SWESMBI\n");
   12297 		/* Release semaphores */
   12298 		wm_put_swsm_semaphore(sc);
   12299 		return 1;
   12300 	}
   12301 	return 0;
   12302 }
   12303 
   12304 /*
   12305  * Put hardware semaphore.
   12306  * Same as e1000_put_hw_semaphore_generic()
   12307  */
   12308 static void
   12309 wm_put_swsm_semaphore(struct wm_softc *sc)
   12310 {
   12311 	uint32_t swsm;
   12312 
   12313 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12314 		device_xname(sc->sc_dev), __func__));
   12315 
   12316 	swsm = CSR_READ(sc, WMREG_SWSM);
   12317 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12318 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12319 }
   12320 
   12321 /*
   12322  * Get SW/FW semaphore.
   12323  * Same as e1000_acquire_swfw_sync_82575().
   12324  */
   12325 static int
   12326 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12327 {
   12328 	uint32_t swfw_sync;
   12329 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12330 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12331 	int timeout = 200;
   12332 
   12333 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12334 		device_xname(sc->sc_dev), __func__));
   12335 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12336 
   12337 	for (timeout = 0; timeout < 200; timeout++) {
   12338 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12339 			if (wm_get_swsm_semaphore(sc)) {
   12340 				aprint_error_dev(sc->sc_dev,
   12341 				    "%s: failed to get semaphore\n",
   12342 				    __func__);
   12343 				return 1;
   12344 			}
   12345 		}
   12346 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12347 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12348 			swfw_sync |= swmask;
   12349 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12350 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12351 				wm_put_swsm_semaphore(sc);
   12352 			return 0;
   12353 		}
   12354 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12355 			wm_put_swsm_semaphore(sc);
   12356 		delay(5000);
   12357 	}
   12358 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12359 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12360 	return 1;
   12361 }
   12362 
   12363 static void
   12364 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12365 {
   12366 	uint32_t swfw_sync;
   12367 
   12368 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12369 		device_xname(sc->sc_dev), __func__));
   12370 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12371 
   12372 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12373 		while (wm_get_swsm_semaphore(sc) != 0)
   12374 			continue;
   12375 	}
   12376 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12377 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12378 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12379 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12380 		wm_put_swsm_semaphore(sc);
   12381 }
   12382 
   12383 static int
   12384 wm_get_phy_82575(struct wm_softc *sc)
   12385 {
   12386 
   12387 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12388 		device_xname(sc->sc_dev), __func__));
   12389 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12390 }
   12391 
   12392 static void
   12393 wm_put_phy_82575(struct wm_softc *sc)
   12394 {
   12395 
   12396 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12397 		device_xname(sc->sc_dev), __func__));
   12398 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12399 }
   12400 
   12401 static int
   12402 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12403 {
   12404 	uint32_t ext_ctrl;
   12405 	int timeout = 200;
   12406 
   12407 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12408 		device_xname(sc->sc_dev), __func__));
   12409 
   12410 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12411 	for (timeout = 0; timeout < 200; timeout++) {
   12412 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12413 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12414 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12415 
   12416 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12417 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12418 			return 0;
   12419 		delay(5000);
   12420 	}
   12421 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12422 	    device_xname(sc->sc_dev), ext_ctrl);
   12423 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12424 	return 1;
   12425 }
   12426 
   12427 static void
   12428 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12429 {
   12430 	uint32_t ext_ctrl;
   12431 
   12432 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12433 		device_xname(sc->sc_dev), __func__));
   12434 
   12435 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12436 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12437 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12438 
   12439 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12440 }
   12441 
   12442 static int
   12443 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12444 {
   12445 	uint32_t ext_ctrl;
   12446 	int timeout;
   12447 
   12448 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12449 		device_xname(sc->sc_dev), __func__));
   12450 	mutex_enter(sc->sc_ich_phymtx);
   12451 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12452 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12453 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12454 			break;
   12455 		delay(1000);
   12456 	}
   12457 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12458 		printf("%s: SW has already locked the resource\n",
   12459 		    device_xname(sc->sc_dev));
   12460 		goto out;
   12461 	}
   12462 
   12463 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12464 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12465 	for (timeout = 0; timeout < 1000; timeout++) {
   12466 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12467 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12468 			break;
   12469 		delay(1000);
   12470 	}
   12471 	if (timeout >= 1000) {
   12472 		printf("%s: failed to acquire semaphore\n",
   12473 		    device_xname(sc->sc_dev));
   12474 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12475 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12476 		goto out;
   12477 	}
   12478 	return 0;
   12479 
   12480 out:
   12481 	mutex_exit(sc->sc_ich_phymtx);
   12482 	return 1;
   12483 }
   12484 
   12485 static void
   12486 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12487 {
   12488 	uint32_t ext_ctrl;
   12489 
   12490 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12491 		device_xname(sc->sc_dev), __func__));
   12492 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12493 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12494 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12495 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12496 	} else {
   12497 		printf("%s: Semaphore unexpectedly released\n",
   12498 		    device_xname(sc->sc_dev));
   12499 	}
   12500 
   12501 	mutex_exit(sc->sc_ich_phymtx);
   12502 }
   12503 
   12504 static int
   12505 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12506 {
   12507 
   12508 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12509 		device_xname(sc->sc_dev), __func__));
   12510 	mutex_enter(sc->sc_ich_nvmmtx);
   12511 
   12512 	return 0;
   12513 }
   12514 
   12515 static void
   12516 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12517 {
   12518 
   12519 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12520 		device_xname(sc->sc_dev), __func__));
   12521 	mutex_exit(sc->sc_ich_nvmmtx);
   12522 }
   12523 
   12524 static int
   12525 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12526 {
   12527 	int i = 0;
   12528 	uint32_t reg;
   12529 
   12530 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12531 		device_xname(sc->sc_dev), __func__));
   12532 
   12533 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12534 	do {
   12535 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12536 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12537 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12538 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12539 			break;
   12540 		delay(2*1000);
   12541 		i++;
   12542 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12543 
   12544 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12545 		wm_put_hw_semaphore_82573(sc);
   12546 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12547 		    device_xname(sc->sc_dev));
   12548 		return -1;
   12549 	}
   12550 
   12551 	return 0;
   12552 }
   12553 
   12554 static void
   12555 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12556 {
   12557 	uint32_t reg;
   12558 
   12559 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12560 		device_xname(sc->sc_dev), __func__));
   12561 
   12562 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12563 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12564 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12565 }
   12566 
   12567 /*
   12568  * Management mode and power management related subroutines.
   12569  * BMC, AMT, suspend/resume and EEE.
   12570  */
   12571 
   12572 #ifdef WM_WOL
   12573 static int
   12574 wm_check_mng_mode(struct wm_softc *sc)
   12575 {
   12576 	int rv;
   12577 
   12578 	switch (sc->sc_type) {
   12579 	case WM_T_ICH8:
   12580 	case WM_T_ICH9:
   12581 	case WM_T_ICH10:
   12582 	case WM_T_PCH:
   12583 	case WM_T_PCH2:
   12584 	case WM_T_PCH_LPT:
   12585 	case WM_T_PCH_SPT:
   12586 		rv = wm_check_mng_mode_ich8lan(sc);
   12587 		break;
   12588 	case WM_T_82574:
   12589 	case WM_T_82583:
   12590 		rv = wm_check_mng_mode_82574(sc);
   12591 		break;
   12592 	case WM_T_82571:
   12593 	case WM_T_82572:
   12594 	case WM_T_82573:
   12595 	case WM_T_80003:
   12596 		rv = wm_check_mng_mode_generic(sc);
   12597 		break;
   12598 	default:
   12599 		/* noting to do */
   12600 		rv = 0;
   12601 		break;
   12602 	}
   12603 
   12604 	return rv;
   12605 }
   12606 
   12607 static int
   12608 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12609 {
   12610 	uint32_t fwsm;
   12611 
   12612 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12613 
   12614 	if (((fwsm & FWSM_FW_VALID) != 0)
   12615 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12616 		return 1;
   12617 
   12618 	return 0;
   12619 }
   12620 
   12621 static int
   12622 wm_check_mng_mode_82574(struct wm_softc *sc)
   12623 {
   12624 	uint16_t data;
   12625 
   12626 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12627 
   12628 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12629 		return 1;
   12630 
   12631 	return 0;
   12632 }
   12633 
   12634 static int
   12635 wm_check_mng_mode_generic(struct wm_softc *sc)
   12636 {
   12637 	uint32_t fwsm;
   12638 
   12639 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12640 
   12641 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12642 		return 1;
   12643 
   12644 	return 0;
   12645 }
   12646 #endif /* WM_WOL */
   12647 
   12648 static int
   12649 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12650 {
   12651 	uint32_t manc, fwsm, factps;
   12652 
   12653 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12654 		return 0;
   12655 
   12656 	manc = CSR_READ(sc, WMREG_MANC);
   12657 
   12658 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12659 		device_xname(sc->sc_dev), manc));
   12660 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12661 		return 0;
   12662 
   12663 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12664 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12665 		factps = CSR_READ(sc, WMREG_FACTPS);
   12666 		if (((factps & FACTPS_MNGCG) == 0)
   12667 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12668 			return 1;
   12669 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12670 		uint16_t data;
   12671 
   12672 		factps = CSR_READ(sc, WMREG_FACTPS);
   12673 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12674 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12675 			device_xname(sc->sc_dev), factps, data));
   12676 		if (((factps & FACTPS_MNGCG) == 0)
   12677 		    && ((data & NVM_CFG2_MNGM_MASK)
   12678 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12679 			return 1;
   12680 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12681 	    && ((manc & MANC_ASF_EN) == 0))
   12682 		return 1;
   12683 
   12684 	return 0;
   12685 }
   12686 
   12687 static bool
   12688 wm_phy_resetisblocked(struct wm_softc *sc)
   12689 {
   12690 	bool blocked = false;
   12691 	uint32_t reg;
   12692 	int i = 0;
   12693 
   12694 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12695 		device_xname(sc->sc_dev), __func__));
   12696 
   12697 	switch (sc->sc_type) {
   12698 	case WM_T_ICH8:
   12699 	case WM_T_ICH9:
   12700 	case WM_T_ICH10:
   12701 	case WM_T_PCH:
   12702 	case WM_T_PCH2:
   12703 	case WM_T_PCH_LPT:
   12704 	case WM_T_PCH_SPT:
   12705 		do {
   12706 			reg = CSR_READ(sc, WMREG_FWSM);
   12707 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12708 				blocked = true;
   12709 				delay(10*1000);
   12710 				continue;
   12711 			}
   12712 			blocked = false;
   12713 		} while (blocked && (i++ < 30));
   12714 		return blocked;
   12715 		break;
   12716 	case WM_T_82571:
   12717 	case WM_T_82572:
   12718 	case WM_T_82573:
   12719 	case WM_T_82574:
   12720 	case WM_T_82583:
   12721 	case WM_T_80003:
   12722 		reg = CSR_READ(sc, WMREG_MANC);
   12723 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12724 			return true;
   12725 		else
   12726 			return false;
   12727 		break;
   12728 	default:
   12729 		/* no problem */
   12730 		break;
   12731 	}
   12732 
   12733 	return false;
   12734 }
   12735 
   12736 static void
   12737 wm_get_hw_control(struct wm_softc *sc)
   12738 {
   12739 	uint32_t reg;
   12740 
   12741 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12742 		device_xname(sc->sc_dev), __func__));
   12743 
   12744 	if (sc->sc_type == WM_T_82573) {
   12745 		reg = CSR_READ(sc, WMREG_SWSM);
   12746 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12747 	} else if (sc->sc_type >= WM_T_82571) {
   12748 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12749 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12750 	}
   12751 }
   12752 
   12753 static void
   12754 wm_release_hw_control(struct wm_softc *sc)
   12755 {
   12756 	uint32_t reg;
   12757 
   12758 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12759 		device_xname(sc->sc_dev), __func__));
   12760 
   12761 	if (sc->sc_type == WM_T_82573) {
   12762 		reg = CSR_READ(sc, WMREG_SWSM);
   12763 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12764 	} else if (sc->sc_type >= WM_T_82571) {
   12765 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12766 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12767 	}
   12768 }
   12769 
   12770 static void
   12771 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12772 {
   12773 	uint32_t reg;
   12774 
   12775 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12776 		device_xname(sc->sc_dev), __func__));
   12777 
   12778 	if (sc->sc_type < WM_T_PCH2)
   12779 		return;
   12780 
   12781 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12782 
   12783 	if (gate)
   12784 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12785 	else
   12786 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12787 
   12788 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12789 }
   12790 
   12791 static void
   12792 wm_smbustopci(struct wm_softc *sc)
   12793 {
   12794 	uint32_t fwsm, reg;
   12795 	int rv = 0;
   12796 
   12797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12798 		device_xname(sc->sc_dev), __func__));
   12799 
   12800 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12801 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12802 
   12803 	/* Disable ULP */
   12804 	wm_ulp_disable(sc);
   12805 
   12806 	/* Acquire PHY semaphore */
   12807 	sc->phy.acquire(sc);
   12808 
   12809 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12810 	switch (sc->sc_type) {
   12811 	case WM_T_PCH_LPT:
   12812 	case WM_T_PCH_SPT:
   12813 		if (wm_phy_is_accessible_pchlan(sc))
   12814 			break;
   12815 
   12816 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12817 		reg |= CTRL_EXT_FORCE_SMBUS;
   12818 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12819 #if 0
   12820 		/* XXX Isn't this required??? */
   12821 		CSR_WRITE_FLUSH(sc);
   12822 #endif
   12823 		delay(50 * 1000);
   12824 		/* FALLTHROUGH */
   12825 	case WM_T_PCH2:
   12826 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12827 			break;
   12828 		/* FALLTHROUGH */
   12829 	case WM_T_PCH:
   12830 		if (sc->sc_type == WM_T_PCH)
   12831 			if ((fwsm & FWSM_FW_VALID) != 0)
   12832 				break;
   12833 
   12834 		if (wm_phy_resetisblocked(sc) == true) {
   12835 			printf("XXX reset is blocked(3)\n");
   12836 			break;
   12837 		}
   12838 
   12839 		wm_toggle_lanphypc_pch_lpt(sc);
   12840 
   12841 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12842 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12843 				break;
   12844 
   12845 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12846 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12847 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12848 
   12849 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12850 				break;
   12851 			rv = -1;
   12852 		}
   12853 		break;
   12854 	default:
   12855 		break;
   12856 	}
   12857 
   12858 	/* Release semaphore */
   12859 	sc->phy.release(sc);
   12860 
   12861 	if (rv == 0) {
   12862 		if (wm_phy_resetisblocked(sc)) {
   12863 			printf("XXX reset is blocked(4)\n");
   12864 			goto out;
   12865 		}
   12866 		wm_reset_phy(sc);
   12867 		if (wm_phy_resetisblocked(sc))
   12868 			printf("XXX reset is blocked(4)\n");
   12869 	}
   12870 
   12871 out:
   12872 	/*
   12873 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12874 	 */
   12875 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12876 		delay(10*1000);
   12877 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12878 	}
   12879 }
   12880 
   12881 static void
   12882 wm_init_manageability(struct wm_softc *sc)
   12883 {
   12884 
   12885 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12886 		device_xname(sc->sc_dev), __func__));
   12887 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12888 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12889 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12890 
   12891 		/* Disable hardware interception of ARP */
   12892 		manc &= ~MANC_ARP_EN;
   12893 
   12894 		/* Enable receiving management packets to the host */
   12895 		if (sc->sc_type >= WM_T_82571) {
   12896 			manc |= MANC_EN_MNG2HOST;
   12897 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12898 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12899 		}
   12900 
   12901 		CSR_WRITE(sc, WMREG_MANC, manc);
   12902 	}
   12903 }
   12904 
   12905 static void
   12906 wm_release_manageability(struct wm_softc *sc)
   12907 {
   12908 
   12909 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12910 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12911 
   12912 		manc |= MANC_ARP_EN;
   12913 		if (sc->sc_type >= WM_T_82571)
   12914 			manc &= ~MANC_EN_MNG2HOST;
   12915 
   12916 		CSR_WRITE(sc, WMREG_MANC, manc);
   12917 	}
   12918 }
   12919 
   12920 static void
   12921 wm_get_wakeup(struct wm_softc *sc)
   12922 {
   12923 
   12924 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12925 	switch (sc->sc_type) {
   12926 	case WM_T_82573:
   12927 	case WM_T_82583:
   12928 		sc->sc_flags |= WM_F_HAS_AMT;
   12929 		/* FALLTHROUGH */
   12930 	case WM_T_80003:
   12931 	case WM_T_82575:
   12932 	case WM_T_82576:
   12933 	case WM_T_82580:
   12934 	case WM_T_I350:
   12935 	case WM_T_I354:
   12936 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12937 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12938 		/* FALLTHROUGH */
   12939 	case WM_T_82541:
   12940 	case WM_T_82541_2:
   12941 	case WM_T_82547:
   12942 	case WM_T_82547_2:
   12943 	case WM_T_82571:
   12944 	case WM_T_82572:
   12945 	case WM_T_82574:
   12946 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12947 		break;
   12948 	case WM_T_ICH8:
   12949 	case WM_T_ICH9:
   12950 	case WM_T_ICH10:
   12951 	case WM_T_PCH:
   12952 	case WM_T_PCH2:
   12953 	case WM_T_PCH_LPT:
   12954 	case WM_T_PCH_SPT:
   12955 		sc->sc_flags |= WM_F_HAS_AMT;
   12956 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12957 		break;
   12958 	default:
   12959 		break;
   12960 	}
   12961 
   12962 	/* 1: HAS_MANAGE */
   12963 	if (wm_enable_mng_pass_thru(sc) != 0)
   12964 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12965 
   12966 #ifdef WM_DEBUG
   12967 	printf("\n");
   12968 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12969 		printf("HAS_AMT,");
   12970 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12971 		printf("ARC_SUBSYS_VALID,");
   12972 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12973 		printf("ASF_FIRMWARE_PRES,");
   12974 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12975 		printf("HAS_MANAGE,");
   12976 	printf("\n");
   12977 #endif
   12978 	/*
   12979 	 * Note that the WOL flags is set after the resetting of the eeprom
   12980 	 * stuff
   12981 	 */
   12982 }
   12983 
   12984 /*
   12985  * Unconfigure Ultra Low Power mode.
   12986  * Only for I217 and newer (see below).
   12987  */
   12988 static void
   12989 wm_ulp_disable(struct wm_softc *sc)
   12990 {
   12991 	uint32_t reg;
   12992 	int i = 0;
   12993 
   12994 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12995 		device_xname(sc->sc_dev), __func__));
   12996 	/* Exclude old devices */
   12997 	if ((sc->sc_type < WM_T_PCH_LPT)
   12998 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12999 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13000 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13001 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13002 		return;
   13003 
   13004 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13005 		/* Request ME un-configure ULP mode in the PHY */
   13006 		reg = CSR_READ(sc, WMREG_H2ME);
   13007 		reg &= ~H2ME_ULP;
   13008 		reg |= H2ME_ENFORCE_SETTINGS;
   13009 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13010 
   13011 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13012 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13013 			if (i++ == 30) {
   13014 				printf("%s timed out\n", __func__);
   13015 				return;
   13016 			}
   13017 			delay(10 * 1000);
   13018 		}
   13019 		reg = CSR_READ(sc, WMREG_H2ME);
   13020 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13021 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13022 
   13023 		return;
   13024 	}
   13025 
   13026 	/* Acquire semaphore */
   13027 	sc->phy.acquire(sc);
   13028 
   13029 	/* Toggle LANPHYPC */
   13030 	wm_toggle_lanphypc_pch_lpt(sc);
   13031 
   13032 	/* Unforce SMBus mode in PHY */
   13033 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13034 	if (reg == 0x0000 || reg == 0xffff) {
   13035 		uint32_t reg2;
   13036 
   13037 		printf("%s: Force SMBus first.\n", __func__);
   13038 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13039 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13041 		delay(50 * 1000);
   13042 
   13043 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13044 	}
   13045 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13046 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13047 
   13048 	/* Unforce SMBus mode in MAC */
   13049 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13050 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13051 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13052 
   13053 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13054 	reg |= HV_PM_CTRL_K1_ENA;
   13055 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13056 
   13057 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13058 	reg &= ~(I218_ULP_CONFIG1_IND
   13059 	    | I218_ULP_CONFIG1_STICKY_ULP
   13060 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13061 	    | I218_ULP_CONFIG1_WOL_HOST
   13062 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13063 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13064 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13065 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13066 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13067 	reg |= I218_ULP_CONFIG1_START;
   13068 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13069 
   13070 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13071 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13072 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13073 
   13074 	/* Release semaphore */
   13075 	sc->phy.release(sc);
   13076 	wm_gmii_reset(sc);
   13077 	delay(50 * 1000);
   13078 }
   13079 
   13080 /* WOL in the newer chipset interfaces (pchlan) */
   13081 static void
   13082 wm_enable_phy_wakeup(struct wm_softc *sc)
   13083 {
   13084 #if 0
   13085 	uint16_t preg;
   13086 
   13087 	/* Copy MAC RARs to PHY RARs */
   13088 
   13089 	/* Copy MAC MTA to PHY MTA */
   13090 
   13091 	/* Configure PHY Rx Control register */
   13092 
   13093 	/* Enable PHY wakeup in MAC register */
   13094 
   13095 	/* Configure and enable PHY wakeup in PHY registers */
   13096 
   13097 	/* Activate PHY wakeup */
   13098 
   13099 	/* XXX */
   13100 #endif
   13101 }
   13102 
   13103 /* Power down workaround on D3 */
   13104 static void
   13105 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13106 {
   13107 	uint32_t reg;
   13108 	int i;
   13109 
   13110 	for (i = 0; i < 2; i++) {
   13111 		/* Disable link */
   13112 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13113 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13114 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13115 
   13116 		/*
   13117 		 * Call gig speed drop workaround on Gig disable before
   13118 		 * accessing any PHY registers
   13119 		 */
   13120 		if (sc->sc_type == WM_T_ICH8)
   13121 			wm_gig_downshift_workaround_ich8lan(sc);
   13122 
   13123 		/* Write VR power-down enable */
   13124 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13125 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13126 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13127 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13128 
   13129 		/* Read it back and test */
   13130 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13131 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13132 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13133 			break;
   13134 
   13135 		/* Issue PHY reset and repeat at most one more time */
   13136 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13137 	}
   13138 }
   13139 
   13140 static void
   13141 wm_enable_wakeup(struct wm_softc *sc)
   13142 {
   13143 	uint32_t reg, pmreg;
   13144 	pcireg_t pmode;
   13145 
   13146 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13147 		device_xname(sc->sc_dev), __func__));
   13148 
   13149 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13150 		&pmreg, NULL) == 0)
   13151 		return;
   13152 
   13153 	/* Advertise the wakeup capability */
   13154 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13155 	    | CTRL_SWDPIN(3));
   13156 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13157 
   13158 	/* ICH workaround */
   13159 	switch (sc->sc_type) {
   13160 	case WM_T_ICH8:
   13161 	case WM_T_ICH9:
   13162 	case WM_T_ICH10:
   13163 	case WM_T_PCH:
   13164 	case WM_T_PCH2:
   13165 	case WM_T_PCH_LPT:
   13166 	case WM_T_PCH_SPT:
   13167 		/* Disable gig during WOL */
   13168 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13169 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13170 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13171 		if (sc->sc_type == WM_T_PCH)
   13172 			wm_gmii_reset(sc);
   13173 
   13174 		/* Power down workaround */
   13175 		if (sc->sc_phytype == WMPHY_82577) {
   13176 			struct mii_softc *child;
   13177 
   13178 			/* Assume that the PHY is copper */
   13179 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13180 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13181 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13182 				    (768 << 5) | 25, 0x0444); /* magic num */
   13183 		}
   13184 		break;
   13185 	default:
   13186 		break;
   13187 	}
   13188 
   13189 	/* Keep the laser running on fiber adapters */
   13190 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13191 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13192 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13193 		reg |= CTRL_EXT_SWDPIN(3);
   13194 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13195 	}
   13196 
   13197 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13198 #if 0	/* for the multicast packet */
   13199 	reg |= WUFC_MC;
   13200 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13201 #endif
   13202 
   13203 	if (sc->sc_type >= WM_T_PCH)
   13204 		wm_enable_phy_wakeup(sc);
   13205 	else {
   13206 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13207 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13208 	}
   13209 
   13210 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13211 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13212 		|| (sc->sc_type == WM_T_PCH2))
   13213 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13214 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13215 
   13216 	/* Request PME */
   13217 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13218 #if 0
   13219 	/* Disable WOL */
   13220 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13221 #else
   13222 	/* For WOL */
   13223 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13224 #endif
   13225 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13226 }
   13227 
   13228 /* LPLU */
   13229 
   13230 static void
   13231 wm_lplu_d0_disable(struct wm_softc *sc)
   13232 {
   13233 	uint32_t reg;
   13234 
   13235 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13236 		device_xname(sc->sc_dev), __func__));
   13237 
   13238 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13239 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13240 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13241 }
   13242 
   13243 static void
   13244 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13245 {
   13246 	uint32_t reg;
   13247 
   13248 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13249 		device_xname(sc->sc_dev), __func__));
   13250 
   13251 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13252 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13253 	reg |= HV_OEM_BITS_ANEGNOW;
   13254 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13255 }
   13256 
   13257 /* EEE */
   13258 
   13259 static void
   13260 wm_set_eee_i350(struct wm_softc *sc)
   13261 {
   13262 	uint32_t ipcnfg, eeer;
   13263 
   13264 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13265 	eeer = CSR_READ(sc, WMREG_EEER);
   13266 
   13267 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13268 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13269 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13270 		    | EEER_LPI_FC);
   13271 	} else {
   13272 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13273 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13274 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13275 		    | EEER_LPI_FC);
   13276 	}
   13277 
   13278 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13279 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13280 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13281 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13282 }
   13283 
   13284 /*
   13285  * Workarounds (mainly PHY related).
   13286  * Basically, PHY's workarounds are in the PHY drivers.
   13287  */
   13288 
   13289 /* Work-around for 82566 Kumeran PCS lock loss */
   13290 static void
   13291 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13292 {
   13293 #if 0
   13294 	int miistatus, active, i;
   13295 	int reg;
   13296 
   13297 	miistatus = sc->sc_mii.mii_media_status;
   13298 
   13299 	/* If the link is not up, do nothing */
   13300 	if ((miistatus & IFM_ACTIVE) == 0)
   13301 		return;
   13302 
   13303 	active = sc->sc_mii.mii_media_active;
   13304 
   13305 	/* Nothing to do if the link is other than 1Gbps */
   13306 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13307 		return;
   13308 
   13309 	for (i = 0; i < 10; i++) {
   13310 		/* read twice */
   13311 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13312 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13313 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13314 			goto out;	/* GOOD! */
   13315 
   13316 		/* Reset the PHY */
   13317 		wm_gmii_reset(sc);
   13318 		delay(5*1000);
   13319 	}
   13320 
   13321 	/* Disable GigE link negotiation */
   13322 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13323 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13324 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13325 
   13326 	/*
   13327 	 * Call gig speed drop workaround on Gig disable before accessing
   13328 	 * any PHY registers.
   13329 	 */
   13330 	wm_gig_downshift_workaround_ich8lan(sc);
   13331 
   13332 out:
   13333 	return;
   13334 #endif
   13335 }
   13336 
   13337 /* WOL from S5 stops working */
   13338 static void
   13339 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13340 {
   13341 	uint16_t kmrn_reg;
   13342 
   13343 	/* Only for igp3 */
   13344 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13345 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13346 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13347 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13348 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13349 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13350 	}
   13351 }
   13352 
   13353 /*
   13354  * Workaround for pch's PHYs
   13355  * XXX should be moved to new PHY driver?
   13356  */
   13357 static void
   13358 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13359 {
   13360 
   13361 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13362 		device_xname(sc->sc_dev), __func__));
   13363 	KASSERT(sc->sc_type == WM_T_PCH);
   13364 
   13365 	if (sc->sc_phytype == WMPHY_82577)
   13366 		wm_set_mdio_slow_mode_hv(sc);
   13367 
   13368 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13369 
   13370 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13371 
   13372 	/* 82578 */
   13373 	if (sc->sc_phytype == WMPHY_82578) {
   13374 		struct mii_softc *child;
   13375 
   13376 		/*
   13377 		 * Return registers to default by doing a soft reset then
   13378 		 * writing 0x3140 to the control register
   13379 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13380 		 */
   13381 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13382 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13383 			PHY_RESET(child);
   13384 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13385 			    0x3140);
   13386 		}
   13387 	}
   13388 
   13389 	/* Select page 0 */
   13390 	sc->phy.acquire(sc);
   13391 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13392 	sc->phy.release(sc);
   13393 
   13394 	/*
   13395 	 * Configure the K1 Si workaround during phy reset assuming there is
   13396 	 * link so that it disables K1 if link is in 1Gbps.
   13397 	 */
   13398 	wm_k1_gig_workaround_hv(sc, 1);
   13399 }
   13400 
   13401 static void
   13402 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13403 {
   13404 
   13405 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13406 		device_xname(sc->sc_dev), __func__));
   13407 	KASSERT(sc->sc_type == WM_T_PCH2);
   13408 
   13409 	wm_set_mdio_slow_mode_hv(sc);
   13410 }
   13411 
   13412 static int
   13413 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13414 {
   13415 	int k1_enable = sc->sc_nvm_k1_enabled;
   13416 
   13417 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13418 		device_xname(sc->sc_dev), __func__));
   13419 
   13420 	if (sc->phy.acquire(sc) != 0)
   13421 		return -1;
   13422 
   13423 	if (link) {
   13424 		k1_enable = 0;
   13425 
   13426 		/* Link stall fix for link up */
   13427 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13428 	} else {
   13429 		/* Link stall fix for link down */
   13430 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13431 	}
   13432 
   13433 	wm_configure_k1_ich8lan(sc, k1_enable);
   13434 	sc->phy.release(sc);
   13435 
   13436 	return 0;
   13437 }
   13438 
   13439 static void
   13440 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13441 {
   13442 	uint32_t reg;
   13443 
   13444 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13445 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13446 	    reg | HV_KMRN_MDIO_SLOW);
   13447 }
   13448 
   13449 static void
   13450 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13451 {
   13452 	uint32_t ctrl, ctrl_ext, tmp;
   13453 	uint16_t kmrn_reg;
   13454 
   13455 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13456 
   13457 	if (k1_enable)
   13458 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13459 	else
   13460 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13461 
   13462 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13463 
   13464 	delay(20);
   13465 
   13466 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13467 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13468 
   13469 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13470 	tmp |= CTRL_FRCSPD;
   13471 
   13472 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13473 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13474 	CSR_WRITE_FLUSH(sc);
   13475 	delay(20);
   13476 
   13477 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13478 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13479 	CSR_WRITE_FLUSH(sc);
   13480 	delay(20);
   13481 }
   13482 
   13483 /* special case - for 82575 - need to do manual init ... */
   13484 static void
   13485 wm_reset_init_script_82575(struct wm_softc *sc)
   13486 {
   13487 	/*
   13488 	 * remark: this is untested code - we have no board without EEPROM
   13489 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13490 	 */
   13491 
   13492 	/* SerDes configuration via SERDESCTRL */
   13493 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13494 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13495 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13496 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13497 
   13498 	/* CCM configuration via CCMCTL register */
   13499 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13500 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13501 
   13502 	/* PCIe lanes configuration */
   13503 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13504 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13505 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13506 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13507 
   13508 	/* PCIe PLL Configuration */
   13509 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13510 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13511 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13512 }
   13513 
   13514 static void
   13515 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13516 {
   13517 	uint32_t reg;
   13518 	uint16_t nvmword;
   13519 	int rv;
   13520 
   13521 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13522 		return;
   13523 
   13524 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13525 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13526 	if (rv != 0) {
   13527 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13528 		    __func__);
   13529 		return;
   13530 	}
   13531 
   13532 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13533 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13534 		reg |= MDICNFG_DEST;
   13535 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13536 		reg |= MDICNFG_COM_MDIO;
   13537 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13538 }
   13539 
   13540 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13541 
   13542 static bool
   13543 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13544 {
   13545 	int i;
   13546 	uint32_t reg;
   13547 	uint16_t id1, id2;
   13548 
   13549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13550 		device_xname(sc->sc_dev), __func__));
   13551 	id1 = id2 = 0xffff;
   13552 	for (i = 0; i < 2; i++) {
   13553 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13554 		if (MII_INVALIDID(id1))
   13555 			continue;
   13556 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13557 		if (MII_INVALIDID(id2))
   13558 			continue;
   13559 		break;
   13560 	}
   13561 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13562 		goto out;
   13563 	}
   13564 
   13565 	if (sc->sc_type < WM_T_PCH_LPT) {
   13566 		sc->phy.release(sc);
   13567 		wm_set_mdio_slow_mode_hv(sc);
   13568 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13569 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13570 		sc->phy.acquire(sc);
   13571 	}
   13572 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13573 		printf("XXX return with false\n");
   13574 		return false;
   13575 	}
   13576 out:
   13577 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13578 		/* Only unforce SMBus if ME is not active */
   13579 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13580 			/* Unforce SMBus mode in PHY */
   13581 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13582 			    CV_SMB_CTRL);
   13583 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13584 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13585 			    CV_SMB_CTRL, reg);
   13586 
   13587 			/* Unforce SMBus mode in MAC */
   13588 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13589 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13591 		}
   13592 	}
   13593 	return true;
   13594 }
   13595 
   13596 static void
   13597 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13598 {
   13599 	uint32_t reg;
   13600 	int i;
   13601 
   13602 	/* Set PHY Config Counter to 50msec */
   13603 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13604 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13605 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13606 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13607 
   13608 	/* Toggle LANPHYPC */
   13609 	reg = CSR_READ(sc, WMREG_CTRL);
   13610 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13611 	reg &= ~CTRL_LANPHYPC_VALUE;
   13612 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13613 	CSR_WRITE_FLUSH(sc);
   13614 	delay(1000);
   13615 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13616 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13617 	CSR_WRITE_FLUSH(sc);
   13618 
   13619 	if (sc->sc_type < WM_T_PCH_LPT)
   13620 		delay(50 * 1000);
   13621 	else {
   13622 		i = 20;
   13623 
   13624 		do {
   13625 			delay(5 * 1000);
   13626 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13627 		    && i--);
   13628 
   13629 		delay(30 * 1000);
   13630 	}
   13631 }
   13632 
   13633 static int
   13634 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13635 {
   13636 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13637 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13638 	uint32_t rxa;
   13639 	uint16_t scale = 0, lat_enc = 0;
   13640 	int64_t lat_ns, value;
   13641 
   13642 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13643 		device_xname(sc->sc_dev), __func__));
   13644 
   13645 	if (link) {
   13646 		pcireg_t preg;
   13647 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13648 
   13649 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13650 
   13651 		/*
   13652 		 * Determine the maximum latency tolerated by the device.
   13653 		 *
   13654 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13655 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13656 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13657 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13658 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13659 		 */
   13660 		lat_ns = ((int64_t)rxa * 1024 -
   13661 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13662 		if (lat_ns < 0)
   13663 			lat_ns = 0;
   13664 		else {
   13665 			uint32_t status;
   13666 			uint16_t speed;
   13667 
   13668 			status = CSR_READ(sc, WMREG_STATUS);
   13669 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13670 			case STATUS_SPEED_10:
   13671 				speed = 10;
   13672 				break;
   13673 			case STATUS_SPEED_100:
   13674 				speed = 100;
   13675 				break;
   13676 			case STATUS_SPEED_1000:
   13677 				speed = 1000;
   13678 				break;
   13679 			default:
   13680 				printf("%s: Unknown speed (status = %08x)\n",
   13681 				    device_xname(sc->sc_dev), status);
   13682 				return -1;
   13683 			}
   13684 			lat_ns /= speed;
   13685 		}
   13686 		value = lat_ns;
   13687 
   13688 		while (value > LTRV_VALUE) {
   13689 			scale ++;
   13690 			value = howmany(value, __BIT(5));
   13691 		}
   13692 		if (scale > LTRV_SCALE_MAX) {
   13693 			printf("%s: Invalid LTR latency scale %d\n",
   13694 			    device_xname(sc->sc_dev), scale);
   13695 			return -1;
   13696 		}
   13697 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13698 
   13699 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13700 		    WM_PCI_LTR_CAP_LPT);
   13701 		max_snoop = preg & 0xffff;
   13702 		max_nosnoop = preg >> 16;
   13703 
   13704 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13705 
   13706 		if (lat_enc > max_ltr_enc) {
   13707 			lat_enc = max_ltr_enc;
   13708 		}
   13709 	}
   13710 	/* Snoop and No-Snoop latencies the same */
   13711 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13712 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13713 
   13714 	return 0;
   13715 }
   13716 
   13717 /*
   13718  * I210 Errata 25 and I211 Errata 10
   13719  * Slow System Clock.
   13720  */
   13721 static void
   13722 wm_pll_workaround_i210(struct wm_softc *sc)
   13723 {
   13724 	uint32_t mdicnfg, wuc;
   13725 	uint32_t reg;
   13726 	pcireg_t pcireg;
   13727 	uint32_t pmreg;
   13728 	uint16_t nvmword, tmp_nvmword;
   13729 	int phyval;
   13730 	bool wa_done = false;
   13731 	int i;
   13732 
   13733 	/* Save WUC and MDICNFG registers */
   13734 	wuc = CSR_READ(sc, WMREG_WUC);
   13735 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13736 
   13737 	reg = mdicnfg & ~MDICNFG_DEST;
   13738 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13739 
   13740 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13741 		nvmword = INVM_DEFAULT_AL;
   13742 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13743 
   13744 	/* Get Power Management cap offset */
   13745 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13746 		&pmreg, NULL) == 0)
   13747 		return;
   13748 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13749 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13750 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13751 
   13752 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13753 			break; /* OK */
   13754 		}
   13755 
   13756 		wa_done = true;
   13757 		/* Directly reset the internal PHY */
   13758 		reg = CSR_READ(sc, WMREG_CTRL);
   13759 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13760 
   13761 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13762 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13764 
   13765 		CSR_WRITE(sc, WMREG_WUC, 0);
   13766 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13767 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13768 
   13769 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13770 		    pmreg + PCI_PMCSR);
   13771 		pcireg |= PCI_PMCSR_STATE_D3;
   13772 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13773 		    pmreg + PCI_PMCSR, pcireg);
   13774 		delay(1000);
   13775 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13776 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13777 		    pmreg + PCI_PMCSR, pcireg);
   13778 
   13779 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13780 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13781 
   13782 		/* Restore WUC register */
   13783 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13784 	}
   13785 
   13786 	/* Restore MDICNFG setting */
   13787 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13788 	if (wa_done)
   13789 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13790 }
   13791