Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.566
      1 /*	$NetBSD: if_wm.c,v 1.566 2018/03/01 03:32:33 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.566 2018/03/01 03:32:33 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544.  We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames.
    203  */
    204 #define	WM_NTXSEGS		256
    205 #define	WM_IFQUEUELEN		256
    206 #define	WM_TXQUEUELEN_MAX	64
    207 #define	WM_TXQUEUELEN_MAX_82547	16
    208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    211 #define	WM_NTXDESC_82542	256
    212 #define	WM_NTXDESC_82544	4096
    213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    218 
    219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    220 
    221 #define	WM_TXINTERQSIZE		256
    222 
    223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    225 #endif
    226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    228 #endif
    229 
    230 /*
    231  * Receive descriptor list size.  We have one Rx buffer for normal
    232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    233  * packet.  We allocate 256 receive descriptors, each with a 2k
    234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    235  */
    236 #define	WM_NRXDESC		256
    237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    240 
    241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 typedef union txdescs {
    249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    251 } txdescs_t;
    252 
    253 typedef union rxdescs {
    254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    257 } rxdescs_t;
    258 
    259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    261 
    262 /*
    263  * Software state for transmit jobs.
    264  */
    265 struct wm_txsoft {
    266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    268 	int txs_firstdesc;		/* first descriptor in packet */
    269 	int txs_lastdesc;		/* last descriptor in packet */
    270 	int txs_ndesc;			/* # of descriptors used */
    271 };
    272 
    273 /*
    274  * Software state for receive buffers.  Each descriptor gets a
    275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    276  * more than one buffer, we chain them together.
    277  */
    278 struct wm_rxsoft {
    279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    281 };
    282 
    283 #define WM_LINKUP_TIMEOUT	50
    284 
    285 static uint16_t swfwphysem[] = {
    286 	SWFW_PHY0_SM,
    287 	SWFW_PHY1_SM,
    288 	SWFW_PHY2_SM,
    289 	SWFW_PHY3_SM
    290 };
    291 
    292 static const uint32_t wm_82580_rxpbs_table[] = {
    293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    294 };
    295 
    296 struct wm_softc;
    297 
    298 #ifdef WM_EVENT_COUNTERS
    299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    301 	struct evcnt qname##_ev_##evname;
    302 
    303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    304 	do{								\
    305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    307 		    "%s%02d%s", #qname, (qnum), #evname);		\
    308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    309 		    (evtype), NULL, (xname),				\
    310 		    (q)->qname##_##evname##_evcnt_name);		\
    311 	}while(0)
    312 
    313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    315 
    316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    318 
    319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    320 	evcnt_detach(&(q)->qname##_ev_##evname);
    321 #endif /* WM_EVENT_COUNTERS */
    322 
    323 struct wm_txqueue {
    324 	kmutex_t *txq_lock;		/* lock for tx operations */
    325 
    326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    327 
    328 	/* Software state for the transmit descriptors. */
    329 	int txq_num;			/* must be a power of two */
    330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    331 
    332 	/* TX control data structures. */
    333 	int txq_ndesc;			/* must be a power of two */
    334 	size_t txq_descsize;		/* a tx descriptor size */
    335 	txdescs_t *txq_descs_u;
    336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    338 	int txq_desc_rseg;		/* real number of control segment */
    339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    340 #define	txq_descs	txq_descs_u->sctxu_txdescs
    341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    342 
    343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    344 
    345 	int txq_free;			/* number of free Tx descriptors */
    346 	int txq_next;			/* next ready Tx descriptor */
    347 
    348 	int txq_sfree;			/* number of free Tx jobs */
    349 	int txq_snext;			/* next free Tx job */
    350 	int txq_sdirty;			/* dirty Tx jobs */
    351 
    352 	/* These 4 variables are used only on the 82547. */
    353 	int txq_fifo_size;		/* Tx FIFO size */
    354 	int txq_fifo_head;		/* current head of FIFO */
    355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    357 
    358 	/*
    359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    360 	 * CPUs. This queue intermediate them without block.
    361 	 */
    362 	pcq_t *txq_interq;
    363 
    364 	/*
    365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    366 	 * to manage Tx H/W queue's busy flag.
    367 	 */
    368 	int txq_flags;			/* flags for H/W queue, see below */
    369 #define	WM_TXQ_NO_SPACE	0x1
    370 
    371 	bool txq_stopping;
    372 
    373 	bool txq_watchdog;
    374 	time_t txq_lastsent;
    375 
    376 	uint32_t txq_packets;		/* for AIM */
    377 	uint32_t txq_bytes;		/* for AIM */
    378 #ifdef WM_EVENT_COUNTERS
    379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    384 						/* XXX not used? */
    385 
    386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    394 
    395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    396 
    397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    399 #endif /* WM_EVENT_COUNTERS */
    400 };
    401 
    402 struct wm_rxqueue {
    403 	kmutex_t *rxq_lock;		/* lock for rx operations */
    404 
    405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    406 
    407 	/* Software state for the receive descriptors. */
    408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    409 
    410 	/* RX control data structures. */
    411 	int rxq_ndesc;			/* must be a power of two */
    412 	size_t rxq_descsize;		/* a rx descriptor size */
    413 	rxdescs_t *rxq_descs_u;
    414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    416 	int rxq_desc_rseg;		/* real number of control segment */
    417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    421 
    422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    423 
    424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    425 	int rxq_discard;
    426 	int rxq_len;
    427 	struct mbuf *rxq_head;
    428 	struct mbuf *rxq_tail;
    429 	struct mbuf **rxq_tailp;
    430 
    431 	bool rxq_stopping;
    432 
    433 	uint32_t rxq_packets;		/* for AIM */
    434 	uint32_t rxq_bytes;		/* for AIM */
    435 #ifdef WM_EVENT_COUNTERS
    436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    438 
    439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    441 #endif
    442 };
    443 
    444 struct wm_queue {
    445 	int wmq_id;			/* index of transmit and receive queues */
    446 	int wmq_intr_idx;		/* index of MSI-X tables */
    447 
    448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    449 	bool wmq_set_itr;
    450 
    451 	struct wm_txqueue wmq_txq;
    452 	struct wm_rxqueue wmq_rxq;
    453 
    454 	void *wmq_si;
    455 };
    456 
    457 struct wm_phyop {
    458 	int (*acquire)(struct wm_softc *);
    459 	void (*release)(struct wm_softc *);
    460 	int reset_delay_us;
    461 };
    462 
    463 struct wm_nvmop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    467 };
    468 
    469 /*
    470  * Software state per device.
    471  */
    472 struct wm_softc {
    473 	device_t sc_dev;		/* generic device information */
    474 	bus_space_tag_t sc_st;		/* bus space tag */
    475 	bus_space_handle_t sc_sh;	/* bus space handle */
    476 	bus_size_t sc_ss;		/* bus space size */
    477 	bus_space_tag_t sc_iot;		/* I/O space tag */
    478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    479 	bus_size_t sc_ios;		/* I/O space size */
    480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    482 	bus_size_t sc_flashs;		/* flash registers space size */
    483 	off_t sc_flashreg_offset;	/*
    484 					 * offset to flash registers from
    485 					 * start of BAR
    486 					 */
    487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    488 
    489 	struct ethercom sc_ethercom;	/* ethernet common data */
    490 	struct mii_data sc_mii;		/* MII/media information */
    491 
    492 	pci_chipset_tag_t sc_pc;
    493 	pcitag_t sc_pcitag;
    494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    496 
    497 	uint16_t sc_pcidevid;		/* PCI device ID */
    498 	wm_chip_type sc_type;		/* MAC type */
    499 	int sc_rev;			/* MAC revision */
    500 	wm_phy_type sc_phytype;		/* PHY type */
    501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    502 #define	WM_MEDIATYPE_UNKNOWN		0x00
    503 #define	WM_MEDIATYPE_FIBER		0x01
    504 #define	WM_MEDIATYPE_COPPER		0x02
    505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    507 	int sc_flags;			/* flags; see below */
    508 	int sc_if_flags;		/* last if_flags */
    509 	int sc_flowflags;		/* 802.3x flow control flags */
    510 	int sc_align_tweak;
    511 
    512 	void *sc_ihs[WM_MAX_NINTR];	/*
    513 					 * interrupt cookie.
    514 					 * - legacy and msi use sc_ihs[0] only
    515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    516 					 */
    517 	pci_intr_handle_t *sc_intrs;	/*
    518 					 * legacy and msi use sc_intrs[0] only
    519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    520 					 */
    521 	int sc_nintrs;			/* number of interrupts */
    522 
    523 	int sc_link_intr_idx;		/* index of MSI-X tables */
    524 
    525 	callout_t sc_tick_ch;		/* tick callout */
    526 	bool sc_core_stopping;
    527 
    528 	int sc_nvm_ver_major;
    529 	int sc_nvm_ver_minor;
    530 	int sc_nvm_ver_build;
    531 	int sc_nvm_addrbits;		/* NVM address bits */
    532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    533 	int sc_ich8_flash_base;
    534 	int sc_ich8_flash_bank_size;
    535 	int sc_nvm_k1_enabled;
    536 
    537 	int sc_nqueues;
    538 	struct wm_queue *sc_queue;
    539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    543 
    544 	int sc_affinity_offset;
    545 
    546 #ifdef WM_EVENT_COUNTERS
    547 	/* Event counters. */
    548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    549 
    550         /* WM_T_82542_2_1 only */
    551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    556 #endif /* WM_EVENT_COUNTERS */
    557 
    558 	/* This variable are used only on the 82547. */
    559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    560 
    561 	uint32_t sc_ctrl;		/* prototype CTRL register */
    562 #if 0
    563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    564 #endif
    565 	uint32_t sc_icr;		/* prototype interrupt bits */
    566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    567 	uint32_t sc_tctl;		/* prototype TCTL register */
    568 	uint32_t sc_rctl;		/* prototype RCTL register */
    569 	uint32_t sc_txcw;		/* prototype TXCW register */
    570 	uint32_t sc_tipg;		/* prototype TIPG register */
    571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    572 	uint32_t sc_pba;		/* prototype PBA register */
    573 
    574 	int sc_tbi_linkup;		/* TBI link status */
    575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    577 
    578 	int sc_mchash_type;		/* multicast filter offset */
    579 
    580 	krndsource_t rnd_source;	/* random source */
    581 
    582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    583 
    584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    585 	kmutex_t *sc_ich_phymtx;	/*
    586 					 * 82574/82583/ICH/PCH specific PHY
    587 					 * mutex. For 82574/82583, the mutex
    588 					 * is used for both PHY and NVM.
    589 					 */
    590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    591 
    592 	struct wm_phyop phy;
    593 	struct wm_nvmop nvm;
    594 };
    595 
    596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    599 
    600 #define	WM_RXCHAIN_RESET(rxq)						\
    601 do {									\
    602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    603 	*(rxq)->rxq_tailp = NULL;					\
    604 	(rxq)->rxq_len = 0;						\
    605 } while (/*CONSTCOND*/0)
    606 
    607 #define	WM_RXCHAIN_LINK(rxq, m)						\
    608 do {									\
    609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    610 	(rxq)->rxq_tailp = &(m)->m_next;				\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #ifdef WM_EVENT_COUNTERS
    614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    616 
    617 #define WM_Q_EVCNT_INCR(qname, evname)			\
    618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    621 #else /* !WM_EVENT_COUNTERS */
    622 #define	WM_EVCNT_INCR(ev)	/* nothing */
    623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    627 #endif /* !WM_EVENT_COUNTERS */
    628 
    629 #define	CSR_READ(sc, reg)						\
    630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    631 #define	CSR_WRITE(sc, reg, val)						\
    632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    633 #define	CSR_WRITE_FLUSH(sc)						\
    634 	(void) CSR_READ((sc), WMREG_STATUS)
    635 
    636 #define ICH8_FLASH_READ32(sc, reg)					\
    637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    638 	    (reg) + sc->sc_flashreg_offset)
    639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    641 	    (reg) + sc->sc_flashreg_offset, (data))
    642 
    643 #define ICH8_FLASH_READ16(sc, reg)					\
    644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    645 	    (reg) + sc->sc_flashreg_offset)
    646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset, (data))
    649 
    650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    652 
    653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    654 #define	WM_CDTXADDR_HI(txq, x)						\
    655 	(sizeof(bus_addr_t) == 8 ?					\
    656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    657 
    658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    659 #define	WM_CDRXADDR_HI(rxq, x)						\
    660 	(sizeof(bus_addr_t) == 8 ?					\
    661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    662 
    663 /*
    664  * Register read/write functions.
    665  * Other than CSR_{READ|WRITE}().
    666  */
    667 #if 0
    668 static inline uint32_t wm_io_read(struct wm_softc *, int);
    669 #endif
    670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    672 	uint32_t, uint32_t);
    673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    674 
    675 /*
    676  * Descriptor sync/init functions.
    677  */
    678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    681 
    682 /*
    683  * Device driver interface functions and commonly used functions.
    684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    685  */
    686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    687 static int	wm_match(device_t, cfdata_t, void *);
    688 static void	wm_attach(device_t, device_t, void *);
    689 static int	wm_detach(device_t, int);
    690 static bool	wm_suspend(device_t, const pmf_qual_t *);
    691 static bool	wm_resume(device_t, const pmf_qual_t *);
    692 static void	wm_watchdog(struct ifnet *);
    693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, uint16_t *);
    694 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, uint16_t *);
    695 static void	wm_tick(void *);
    696 static int	wm_ifflags_cb(struct ethercom *);
    697 static int	wm_ioctl(struct ifnet *, u_long, void *);
    698 /* MAC address related */
    699 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    700 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    701 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    702 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    703 static void	wm_set_filter(struct wm_softc *);
    704 /* Reset and init related */
    705 static void	wm_set_vlan(struct wm_softc *);
    706 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    707 static void	wm_get_auto_rd_done(struct wm_softc *);
    708 static void	wm_lan_init_done(struct wm_softc *);
    709 static void	wm_get_cfg_done(struct wm_softc *);
    710 static void	wm_phy_post_reset(struct wm_softc *);
    711 static void	wm_write_smbus_addr(struct wm_softc *);
    712 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    713 static void	wm_initialize_hardware_bits(struct wm_softc *);
    714 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    715 static void	wm_reset_phy(struct wm_softc *);
    716 static void	wm_flush_desc_rings(struct wm_softc *);
    717 static void	wm_reset(struct wm_softc *);
    718 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    719 static void	wm_rxdrain(struct wm_rxqueue *);
    720 static void	wm_init_rss(struct wm_softc *);
    721 static void	wm_adjust_qnum(struct wm_softc *, int);
    722 static inline bool	wm_is_using_msix(struct wm_softc *);
    723 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    724 static int	wm_softint_establish(struct wm_softc *, int, int);
    725 static int	wm_setup_legacy(struct wm_softc *);
    726 static int	wm_setup_msix(struct wm_softc *);
    727 static int	wm_init(struct ifnet *);
    728 static int	wm_init_locked(struct ifnet *);
    729 static void	wm_unset_stopping_flags(struct wm_softc *);
    730 static void	wm_set_stopping_flags(struct wm_softc *);
    731 static void	wm_stop(struct ifnet *, int);
    732 static void	wm_stop_locked(struct ifnet *, int);
    733 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    734 static void	wm_82547_txfifo_stall(void *);
    735 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    736 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    737 /* DMA related */
    738 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    739 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    740 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    741 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    742     struct wm_txqueue *);
    743 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    744 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    745 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    746     struct wm_rxqueue *);
    747 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    750 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    751 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    752 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    754     struct wm_txqueue *);
    755 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    756     struct wm_rxqueue *);
    757 static int	wm_alloc_txrx_queues(struct wm_softc *);
    758 static void	wm_free_txrx_queues(struct wm_softc *);
    759 static int	wm_init_txrx_queues(struct wm_softc *);
    760 /* Start */
    761 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    762     struct wm_txsoft *, uint32_t *, uint8_t *);
    763 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    764 static void	wm_start(struct ifnet *);
    765 static void	wm_start_locked(struct ifnet *);
    766 static int	wm_transmit(struct ifnet *, struct mbuf *);
    767 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    768 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    769 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    771 static void	wm_nq_start(struct ifnet *);
    772 static void	wm_nq_start_locked(struct ifnet *);
    773 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    774 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    775 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    776 static void	wm_deferred_start_locked(struct wm_txqueue *);
    777 static void	wm_handle_queue(void *);
    778 /* Interrupt */
    779 static bool	wm_txeof(struct wm_txqueue *, u_int);
    780 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    781 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    782 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    783 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    784 static void	wm_linkintr(struct wm_softc *, uint32_t);
    785 static int	wm_intr_legacy(void *);
    786 static inline void	wm_txrxintr_disable(struct wm_queue *);
    787 static inline void	wm_txrxintr_enable(struct wm_queue *);
    788 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    789 static int	wm_txrxintr_msix(void *);
    790 static int	wm_linkintr_msix(void *);
    791 
    792 /*
    793  * Media related.
    794  * GMII, SGMII, TBI, SERDES and SFP.
    795  */
    796 /* Common */
    797 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    798 /* GMII related */
    799 static void	wm_gmii_reset(struct wm_softc *);
    800 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    801 static int	wm_get_phy_id_82575(struct wm_softc *);
    802 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    803 static int	wm_gmii_mediachange(struct ifnet *);
    804 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    805 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    806 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    807 static int	wm_gmii_i82543_readreg(device_t, int, int);
    808 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    809 static int	wm_gmii_mdic_readreg(device_t, int, int);
    810 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    811 static int	wm_gmii_i82544_readreg(device_t, int, int);
    812 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    813 static int	wm_gmii_i80003_readreg(device_t, int, int);
    814 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    815 static int	wm_gmii_bm_readreg(device_t, int, int);
    816 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    817 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    818 static int	wm_gmii_hv_readreg(device_t, int, int);
    819 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    820 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    821 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    822 static int	wm_gmii_82580_readreg(device_t, int, int);
    823 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    824 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    825 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    826 static void	wm_gmii_statchg(struct ifnet *);
    827 /*
    828  * kumeran related (80003, ICH* and PCH*).
    829  * These functions are not for accessing MII registers but for accessing
    830  * kumeran specific registers.
    831  */
    832 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    833 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    834 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    835 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    836 /* SGMII */
    837 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    838 static int	wm_sgmii_readreg(device_t, int, int);
    839 static void	wm_sgmii_writereg(device_t, int, int, int);
    840 /* TBI related */
    841 static void	wm_tbi_mediainit(struct wm_softc *);
    842 static int	wm_tbi_mediachange(struct ifnet *);
    843 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    844 static int	wm_check_for_link(struct wm_softc *);
    845 static void	wm_tbi_tick(struct wm_softc *);
    846 /* SERDES related */
    847 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    848 static int	wm_serdes_mediachange(struct ifnet *);
    849 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    850 static void	wm_serdes_tick(struct wm_softc *);
    851 /* SFP related */
    852 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    853 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    854 
    855 /*
    856  * NVM related.
    857  * Microwire, SPI (w/wo EERD) and Flash.
    858  */
    859 /* Misc functions */
    860 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    861 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    862 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    863 /* Microwire */
    864 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    865 /* SPI */
    866 static int	wm_nvm_ready_spi(struct wm_softc *);
    867 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    868 /* Using with EERD */
    869 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    870 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    871 /* Flash */
    872 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    873     unsigned int *);
    874 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    875 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    876 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    877 	uint32_t *);
    878 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    879 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    880 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    881 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    882 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    883 /* iNVM */
    884 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    885 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    886 /* Lock, detecting NVM type, validate checksum and read */
    887 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    888 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    889 static int	wm_nvm_validate_checksum(struct wm_softc *);
    890 static void	wm_nvm_version_invm(struct wm_softc *);
    891 static void	wm_nvm_version(struct wm_softc *);
    892 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    893 
    894 /*
    895  * Hardware semaphores.
    896  * Very complexed...
    897  */
    898 static int	wm_get_null(struct wm_softc *);
    899 static void	wm_put_null(struct wm_softc *);
    900 static int	wm_get_eecd(struct wm_softc *);
    901 static void	wm_put_eecd(struct wm_softc *);
    902 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    903 static void	wm_put_swsm_semaphore(struct wm_softc *);
    904 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    905 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    906 static int	wm_get_nvm_80003(struct wm_softc *);
    907 static void	wm_put_nvm_80003(struct wm_softc *);
    908 static int	wm_get_nvm_82571(struct wm_softc *);
    909 static void	wm_put_nvm_82571(struct wm_softc *);
    910 static int	wm_get_phy_82575(struct wm_softc *);
    911 static void	wm_put_phy_82575(struct wm_softc *);
    912 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    913 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    914 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    915 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    916 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    917 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    918 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    919 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    920 
    921 /*
    922  * Management mode and power management related subroutines.
    923  * BMC, AMT, suspend/resume and EEE.
    924  */
    925 #if 0
    926 static int	wm_check_mng_mode(struct wm_softc *);
    927 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    928 static int	wm_check_mng_mode_82574(struct wm_softc *);
    929 static int	wm_check_mng_mode_generic(struct wm_softc *);
    930 #endif
    931 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    932 static bool	wm_phy_resetisblocked(struct wm_softc *);
    933 static void	wm_get_hw_control(struct wm_softc *);
    934 static void	wm_release_hw_control(struct wm_softc *);
    935 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    936 static void	wm_smbustopci(struct wm_softc *);
    937 static void	wm_init_manageability(struct wm_softc *);
    938 static void	wm_release_manageability(struct wm_softc *);
    939 static void	wm_get_wakeup(struct wm_softc *);
    940 static void	wm_ulp_disable(struct wm_softc *);
    941 static void	wm_enable_phy_wakeup(struct wm_softc *);
    942 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    943 static void	wm_enable_wakeup(struct wm_softc *);
    944 static void	wm_disable_aspm(struct wm_softc *);
    945 /* LPLU (Low Power Link Up) */
    946 static void	wm_lplu_d0_disable(struct wm_softc *);
    947 /* EEE */
    948 static void	wm_set_eee_i350(struct wm_softc *);
    949 
    950 /*
    951  * Workarounds (mainly PHY related).
    952  * Basically, PHY's workarounds are in the PHY drivers.
    953  */
    954 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    955 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    956 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    957 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    958 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    959 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    960 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    961 static void	wm_reset_init_script_82575(struct wm_softc *);
    962 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    963 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    964 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    965 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    966 static void	wm_pll_workaround_i210(struct wm_softc *);
    967 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    968 
    969 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    970     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    971 
    972 /*
    973  * Devices supported by this driver.
    974  */
    975 static const struct wm_product {
    976 	pci_vendor_id_t		wmp_vendor;
    977 	pci_product_id_t	wmp_product;
    978 	const char		*wmp_name;
    979 	wm_chip_type		wmp_type;
    980 	uint32_t		wmp_flags;
    981 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    982 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    983 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    984 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    985 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    986 } wm_products[] = {
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    988 	  "Intel i82542 1000BASE-X Ethernet",
    989 	  WM_T_82542_2_1,	WMP_F_FIBER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    992 	  "Intel i82543GC 1000BASE-X Ethernet",
    993 	  WM_T_82543,		WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    996 	  "Intel i82543GC 1000BASE-T Ethernet",
    997 	  WM_T_82543,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1000 	  "Intel i82544EI 1000BASE-T Ethernet",
   1001 	  WM_T_82544,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1004 	  "Intel i82544EI 1000BASE-X Ethernet",
   1005 	  WM_T_82544,		WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1008 	  "Intel i82544GC 1000BASE-T Ethernet",
   1009 	  WM_T_82544,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1012 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1016 	  "Intel i82540EM 1000BASE-T Ethernet",
   1017 	  WM_T_82540,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1020 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1021 	  WM_T_82540,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1024 	  "Intel i82540EP 1000BASE-T Ethernet",
   1025 	  WM_T_82540,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1028 	  "Intel i82540EP 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1032 	  "Intel i82540EP 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1036 	  "Intel i82545EM 1000BASE-T Ethernet",
   1037 	  WM_T_82545,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1040 	  "Intel i82545GM 1000BASE-T Ethernet",
   1041 	  WM_T_82545_3,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1044 	  "Intel i82545GM 1000BASE-X Ethernet",
   1045 	  WM_T_82545_3,		WMP_F_FIBER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1048 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1049 	  WM_T_82545_3,		WMP_F_SERDES },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1052 	  "Intel i82546EB 1000BASE-T Ethernet",
   1053 	  WM_T_82546,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1056 	  "Intel i82546EB 1000BASE-T Ethernet",
   1057 	  WM_T_82546,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1060 	  "Intel i82545EM 1000BASE-X Ethernet",
   1061 	  WM_T_82545,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1064 	  "Intel i82546EB 1000BASE-X Ethernet",
   1065 	  WM_T_82546,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1068 	  "Intel i82546GB 1000BASE-T Ethernet",
   1069 	  WM_T_82546_3,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1072 	  "Intel i82546GB 1000BASE-X Ethernet",
   1073 	  WM_T_82546_3,		WMP_F_FIBER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1076 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1077 	  WM_T_82546_3,		WMP_F_SERDES },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1080 	  "i82546GB quad-port Gigabit Ethernet",
   1081 	  WM_T_82546_3,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1084 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1088 	  "Intel PRO/1000MT (82546GB)",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1092 	  "Intel i82541EI 1000BASE-T Ethernet",
   1093 	  WM_T_82541,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1096 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1097 	  WM_T_82541,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1100 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1101 	  WM_T_82541,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1104 	  "Intel i82541ER 1000BASE-T Ethernet",
   1105 	  WM_T_82541_2,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1108 	  "Intel i82541GI 1000BASE-T Ethernet",
   1109 	  WM_T_82541_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1112 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1113 	  WM_T_82541_2,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1116 	  "Intel i82541PI 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1120 	  "Intel i82547EI 1000BASE-T Ethernet",
   1121 	  WM_T_82547,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1124 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1125 	  WM_T_82547,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1128 	  "Intel i82547GI 1000BASE-T Ethernet",
   1129 	  WM_T_82547_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1132 	  "Intel PRO/1000 PT (82571EB)",
   1133 	  WM_T_82571,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1136 	  "Intel PRO/1000 PF (82571EB)",
   1137 	  WM_T_82571,		WMP_F_FIBER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1140 	  "Intel PRO/1000 PB (82571EB)",
   1141 	  WM_T_82571,		WMP_F_SERDES },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1144 	  "Intel PRO/1000 QT (82571EB)",
   1145 	  WM_T_82571,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1148 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1149 	  WM_T_82571,		WMP_F_COPPER, },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1152 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1153 	  WM_T_82571,		WMP_F_COPPER, },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1156 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82571,		WMP_F_SERDES, },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1160 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1161 	  WM_T_82571,		WMP_F_SERDES, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1164 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1165 	  WM_T_82571,		WMP_F_FIBER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1168 	  "Intel i82572EI 1000baseT Ethernet",
   1169 	  WM_T_82572,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1172 	  "Intel i82572EI 1000baseX Ethernet",
   1173 	  WM_T_82572,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1176 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82572,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1180 	  "Intel i82572EI 1000baseT Ethernet",
   1181 	  WM_T_82572,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1184 	  "Intel i82573E",
   1185 	  WM_T_82573,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1188 	  "Intel i82573E IAMT",
   1189 	  WM_T_82573,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1192 	  "Intel i82573L Gigabit Ethernet",
   1193 	  WM_T_82573,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1196 	  "Intel i82574L",
   1197 	  WM_T_82574,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1200 	  "Intel i82574L",
   1201 	  WM_T_82574,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1204 	  "Intel i82583V",
   1205 	  WM_T_82583,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1208 	  "i80003 dual 1000baseT Ethernet",
   1209 	  WM_T_80003,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1212 	  "i80003 dual 1000baseX Ethernet",
   1213 	  WM_T_80003,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1216 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1217 	  WM_T_80003,		WMP_F_SERDES },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1220 	  "Intel i80003 1000baseT Ethernet",
   1221 	  WM_T_80003,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1224 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1225 	  WM_T_80003,		WMP_F_SERDES },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1228 	  "Intel i82801H (M_AMT) LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1231 	  "Intel i82801H (AMT) LAN Controller",
   1232 	  WM_T_ICH8,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1234 	  "Intel i82801H LAN Controller",
   1235 	  WM_T_ICH8,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1237 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1238 	  WM_T_ICH8,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1240 	  "Intel i82801H (M) LAN Controller",
   1241 	  WM_T_ICH8,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1243 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1244 	  WM_T_ICH8,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1246 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1249 	  "82567V-3 LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1252 	  "82801I (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1255 	  "82801I 10/100 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1258 	  "82801I (G) 10/100 LAN Controller",
   1259 	  WM_T_ICH9,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1261 	  "82801I (GT) 10/100 LAN Controller",
   1262 	  WM_T_ICH9,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1264 	  "82801I (C) LAN Controller",
   1265 	  WM_T_ICH9,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1267 	  "82801I mobile LAN Controller",
   1268 	  WM_T_ICH9,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1270 	  "82801I mobile (V) LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1273 	  "82801I mobile (AMT) LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1276 	  "82567LM-4 LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1279 	  "82567LM-2 LAN Controller",
   1280 	  WM_T_ICH10,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1282 	  "82567LF-2 LAN Controller",
   1283 	  WM_T_ICH10,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1285 	  "82567LM-3 LAN Controller",
   1286 	  WM_T_ICH10,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1288 	  "82567LF-3 LAN Controller",
   1289 	  WM_T_ICH10,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1291 	  "82567V-2 LAN Controller",
   1292 	  WM_T_ICH10,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1294 	  "82567V-3? LAN Controller",
   1295 	  WM_T_ICH10,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1297 	  "HANKSVILLE LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1300 	  "PCH LAN (82577LM) Controller",
   1301 	  WM_T_PCH,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1303 	  "PCH LAN (82577LC) Controller",
   1304 	  WM_T_PCH,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1306 	  "PCH LAN (82578DM) Controller",
   1307 	  WM_T_PCH,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1309 	  "PCH LAN (82578DC) Controller",
   1310 	  WM_T_PCH,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1312 	  "PCH2 LAN (82579LM) Controller",
   1313 	  WM_T_PCH2,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1315 	  "PCH2 LAN (82579V) Controller",
   1316 	  WM_T_PCH2,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1318 	  "82575EB dual-1000baseT Ethernet",
   1319 	  WM_T_82575,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1321 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1322 	  WM_T_82575,		WMP_F_SERDES },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1324 	  "82575GB quad-1000baseT Ethernet",
   1325 	  WM_T_82575,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1327 	  "82575GB quad-1000baseT Ethernet (PM)",
   1328 	  WM_T_82575,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1330 	  "82576 1000BaseT Ethernet",
   1331 	  WM_T_82576,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1333 	  "82576 1000BaseX Ethernet",
   1334 	  WM_T_82576,		WMP_F_FIBER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1337 	  "82576 gigabit Ethernet (SERDES)",
   1338 	  WM_T_82576,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1341 	  "82576 quad-1000BaseT Ethernet",
   1342 	  WM_T_82576,		WMP_F_COPPER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1345 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1346 	  WM_T_82576,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1349 	  "82576 gigabit Ethernet",
   1350 	  WM_T_82576,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1353 	  "82576 gigabit Ethernet (SERDES)",
   1354 	  WM_T_82576,		WMP_F_SERDES },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1356 	  "82576 quad-gigabit Ethernet (SERDES)",
   1357 	  WM_T_82576,		WMP_F_SERDES },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1360 	  "82580 1000BaseT Ethernet",
   1361 	  WM_T_82580,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1363 	  "82580 1000BaseX Ethernet",
   1364 	  WM_T_82580,		WMP_F_FIBER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1367 	  "82580 1000BaseT Ethernet (SERDES)",
   1368 	  WM_T_82580,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1371 	  "82580 gigabit Ethernet (SGMII)",
   1372 	  WM_T_82580,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1374 	  "82580 dual-1000BaseT Ethernet",
   1375 	  WM_T_82580,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1378 	  "82580 quad-1000BaseX Ethernet",
   1379 	  WM_T_82580,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1382 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1383 	  WM_T_82580,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1386 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1387 	  WM_T_82580,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1390 	  "DH89XXCC 1000BASE-KX Ethernet",
   1391 	  WM_T_82580,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1394 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1395 	  WM_T_82580,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1398 	  "I350 Gigabit Network Connection",
   1399 	  WM_T_I350,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1402 	  "I350 Gigabit Fiber Network Connection",
   1403 	  WM_T_I350,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1406 	  "I350 Gigabit Backplane Connection",
   1407 	  WM_T_I350,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1410 	  "I350 Quad Port Gigabit Ethernet",
   1411 	  WM_T_I350,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1414 	  "I350 Gigabit Connection",
   1415 	  WM_T_I350,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1418 	  "I354 Gigabit Ethernet (KX)",
   1419 	  WM_T_I354,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1422 	  "I354 Gigabit Ethernet (SGMII)",
   1423 	  WM_T_I354,		WMP_F_COPPER },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1426 	  "I354 Gigabit Ethernet (2.5G)",
   1427 	  WM_T_I354,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1430 	  "I210-T1 Ethernet Server Adapter",
   1431 	  WM_T_I210,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1434 	  "I210 Ethernet (Copper OEM)",
   1435 	  WM_T_I210,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1438 	  "I210 Ethernet (Copper IT)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1442 	  "I210 Ethernet (FLASH less)",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1446 	  "I210 Gigabit Ethernet (Fiber)",
   1447 	  WM_T_I210,		WMP_F_FIBER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1450 	  "I210 Gigabit Ethernet (SERDES)",
   1451 	  WM_T_I210,		WMP_F_SERDES },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1454 	  "I210 Gigabit Ethernet (FLASH less)",
   1455 	  WM_T_I210,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1458 	  "I210 Gigabit Ethernet (SGMII)",
   1459 	  WM_T_I210,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1462 	  "I211 Ethernet (COPPER)",
   1463 	  WM_T_I211,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1465 	  "I217 V Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1468 	  "I217 LM Ethernet Connection",
   1469 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1471 	  "I218 V Ethernet Connection",
   1472 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1474 	  "I218 V Ethernet Connection",
   1475 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1477 	  "I218 V Ethernet Connection",
   1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1480 	  "I218 LM Ethernet Connection",
   1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1483 	  "I218 LM Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1486 	  "I218 LM Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 #if 0
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1490 	  "I219 V Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1493 	  "I219 V Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1496 	  "I219 V Ethernet Connection",
   1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1499 	  "I219 V Ethernet Connection",
   1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1502 	  "I219 LM Ethernet Connection",
   1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1505 	  "I219 LM Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1508 	  "I219 LM Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1511 	  "I219 LM Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1514 	  "I219 LM Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 #endif
   1517 	{ 0,			0,
   1518 	  NULL,
   1519 	  0,			0 },
   1520 };
   1521 
   1522 /*
   1523  * Register read/write functions.
   1524  * Other than CSR_{READ|WRITE}().
   1525  */
   1526 
   1527 #if 0 /* Not currently used */
   1528 static inline uint32_t
   1529 wm_io_read(struct wm_softc *sc, int reg)
   1530 {
   1531 
   1532 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1533 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1534 }
   1535 #endif
   1536 
   1537 static inline void
   1538 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1539 {
   1540 
   1541 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1542 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1543 }
   1544 
   1545 static inline void
   1546 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1547     uint32_t data)
   1548 {
   1549 	uint32_t regval;
   1550 	int i;
   1551 
   1552 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1553 
   1554 	CSR_WRITE(sc, reg, regval);
   1555 
   1556 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1557 		delay(5);
   1558 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1559 			break;
   1560 	}
   1561 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1562 		aprint_error("%s: WARNING:"
   1563 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1564 		    device_xname(sc->sc_dev), reg);
   1565 	}
   1566 }
   1567 
   1568 static inline void
   1569 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1570 {
   1571 	wa->wa_low = htole32(v & 0xffffffffU);
   1572 	if (sizeof(bus_addr_t) == 8)
   1573 		wa->wa_high = htole32((uint64_t) v >> 32);
   1574 	else
   1575 		wa->wa_high = 0;
   1576 }
   1577 
   1578 /*
   1579  * Descriptor sync/init functions.
   1580  */
   1581 static inline void
   1582 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1583 {
   1584 	struct wm_softc *sc = txq->txq_sc;
   1585 
   1586 	/* If it will wrap around, sync to the end of the ring. */
   1587 	if ((start + num) > WM_NTXDESC(txq)) {
   1588 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1589 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1590 		    (WM_NTXDESC(txq) - start), ops);
   1591 		num -= (WM_NTXDESC(txq) - start);
   1592 		start = 0;
   1593 	}
   1594 
   1595 	/* Now sync whatever is left. */
   1596 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1597 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1598 }
   1599 
   1600 static inline void
   1601 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1602 {
   1603 	struct wm_softc *sc = rxq->rxq_sc;
   1604 
   1605 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1606 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1607 }
   1608 
   1609 static inline void
   1610 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1611 {
   1612 	struct wm_softc *sc = rxq->rxq_sc;
   1613 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1614 	struct mbuf *m = rxs->rxs_mbuf;
   1615 
   1616 	/*
   1617 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1618 	 * so that the payload after the Ethernet header is aligned
   1619 	 * to a 4-byte boundary.
   1620 
   1621 	 * XXX BRAINDAMAGE ALERT!
   1622 	 * The stupid chip uses the same size for every buffer, which
   1623 	 * is set in the Receive Control register.  We are using the 2K
   1624 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1625 	 * reason, we can't "scoot" packets longer than the standard
   1626 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1627 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1628 	 * the upper layer copy the headers.
   1629 	 */
   1630 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1631 
   1632 	if (sc->sc_type == WM_T_82574) {
   1633 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1634 		rxd->erx_data.erxd_addr =
   1635 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1636 		rxd->erx_data.erxd_dd = 0;
   1637 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1638 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1639 
   1640 		rxd->nqrx_data.nrxd_paddr =
   1641 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1642 		/* Currently, split header is not supported. */
   1643 		rxd->nqrx_data.nrxd_haddr = 0;
   1644 	} else {
   1645 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1646 
   1647 		wm_set_dma_addr(&rxd->wrx_addr,
   1648 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1649 		rxd->wrx_len = 0;
   1650 		rxd->wrx_cksum = 0;
   1651 		rxd->wrx_status = 0;
   1652 		rxd->wrx_errors = 0;
   1653 		rxd->wrx_special = 0;
   1654 	}
   1655 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1656 
   1657 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1658 }
   1659 
   1660 /*
   1661  * Device driver interface functions and commonly used functions.
   1662  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1663  */
   1664 
   1665 /* Lookup supported device table */
   1666 static const struct wm_product *
   1667 wm_lookup(const struct pci_attach_args *pa)
   1668 {
   1669 	const struct wm_product *wmp;
   1670 
   1671 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1672 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1673 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1674 			return wmp;
   1675 	}
   1676 	return NULL;
   1677 }
   1678 
   1679 /* The match function (ca_match) */
   1680 static int
   1681 wm_match(device_t parent, cfdata_t cf, void *aux)
   1682 {
   1683 	struct pci_attach_args *pa = aux;
   1684 
   1685 	if (wm_lookup(pa) != NULL)
   1686 		return 1;
   1687 
   1688 	return 0;
   1689 }
   1690 
   1691 /* The attach function (ca_attach) */
   1692 static void
   1693 wm_attach(device_t parent, device_t self, void *aux)
   1694 {
   1695 	struct wm_softc *sc = device_private(self);
   1696 	struct pci_attach_args *pa = aux;
   1697 	prop_dictionary_t dict;
   1698 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1699 	pci_chipset_tag_t pc = pa->pa_pc;
   1700 	int counts[PCI_INTR_TYPE_SIZE];
   1701 	pci_intr_type_t max_type;
   1702 	const char *eetype, *xname;
   1703 	bus_space_tag_t memt;
   1704 	bus_space_handle_t memh;
   1705 	bus_size_t memsize;
   1706 	int memh_valid;
   1707 	int i, error;
   1708 	const struct wm_product *wmp;
   1709 	prop_data_t ea;
   1710 	prop_number_t pn;
   1711 	uint8_t enaddr[ETHER_ADDR_LEN];
   1712 	char buf[256];
   1713 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1714 	pcireg_t preg, memtype;
   1715 	uint16_t eeprom_data, apme_mask;
   1716 	bool force_clear_smbi;
   1717 	uint32_t link_mode;
   1718 	uint32_t reg;
   1719 
   1720 	sc->sc_dev = self;
   1721 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1722 	sc->sc_core_stopping = false;
   1723 
   1724 	wmp = wm_lookup(pa);
   1725 #ifdef DIAGNOSTIC
   1726 	if (wmp == NULL) {
   1727 		printf("\n");
   1728 		panic("wm_attach: impossible");
   1729 	}
   1730 #endif
   1731 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1732 
   1733 	sc->sc_pc = pa->pa_pc;
   1734 	sc->sc_pcitag = pa->pa_tag;
   1735 
   1736 	if (pci_dma64_available(pa))
   1737 		sc->sc_dmat = pa->pa_dmat64;
   1738 	else
   1739 		sc->sc_dmat = pa->pa_dmat;
   1740 
   1741 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1742 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1743 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1744 
   1745 	sc->sc_type = wmp->wmp_type;
   1746 
   1747 	/* Set default function pointers */
   1748 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1749 	sc->phy.release = sc->nvm.release = wm_put_null;
   1750 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1751 
   1752 	if (sc->sc_type < WM_T_82543) {
   1753 		if (sc->sc_rev < 2) {
   1754 			aprint_error_dev(sc->sc_dev,
   1755 			    "i82542 must be at least rev. 2\n");
   1756 			return;
   1757 		}
   1758 		if (sc->sc_rev < 3)
   1759 			sc->sc_type = WM_T_82542_2_0;
   1760 	}
   1761 
   1762 	/*
   1763 	 * Disable MSI for Errata:
   1764 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1765 	 *
   1766 	 *  82544: Errata 25
   1767 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1768 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1769 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1770 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1771 	 *
   1772 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1773 	 *
   1774 	 *  82571 & 82572: Errata 63
   1775 	 */
   1776 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1777 	    || (sc->sc_type == WM_T_82572))
   1778 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1779 
   1780 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1781 	    || (sc->sc_type == WM_T_82580)
   1782 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1783 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1784 		sc->sc_flags |= WM_F_NEWQUEUE;
   1785 
   1786 	/* Set device properties (mactype) */
   1787 	dict = device_properties(sc->sc_dev);
   1788 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1789 
   1790 	/*
   1791 	 * Map the device.  All devices support memory-mapped acccess,
   1792 	 * and it is really required for normal operation.
   1793 	 */
   1794 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1795 	switch (memtype) {
   1796 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1797 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1798 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1799 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1800 		break;
   1801 	default:
   1802 		memh_valid = 0;
   1803 		break;
   1804 	}
   1805 
   1806 	if (memh_valid) {
   1807 		sc->sc_st = memt;
   1808 		sc->sc_sh = memh;
   1809 		sc->sc_ss = memsize;
   1810 	} else {
   1811 		aprint_error_dev(sc->sc_dev,
   1812 		    "unable to map device registers\n");
   1813 		return;
   1814 	}
   1815 
   1816 	/*
   1817 	 * In addition, i82544 and later support I/O mapped indirect
   1818 	 * register access.  It is not desirable (nor supported in
   1819 	 * this driver) to use it for normal operation, though it is
   1820 	 * required to work around bugs in some chip versions.
   1821 	 */
   1822 	if (sc->sc_type >= WM_T_82544) {
   1823 		/* First we have to find the I/O BAR. */
   1824 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1825 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1826 			if (memtype == PCI_MAPREG_TYPE_IO)
   1827 				break;
   1828 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1829 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1830 				i += 4;	/* skip high bits, too */
   1831 		}
   1832 		if (i < PCI_MAPREG_END) {
   1833 			/*
   1834 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1835 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1836 			 * It's no problem because newer chips has no this
   1837 			 * bug.
   1838 			 *
   1839 			 * The i8254x doesn't apparently respond when the
   1840 			 * I/O BAR is 0, which looks somewhat like it's not
   1841 			 * been configured.
   1842 			 */
   1843 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1844 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1845 				aprint_error_dev(sc->sc_dev,
   1846 				    "WARNING: I/O BAR at zero.\n");
   1847 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1848 					0, &sc->sc_iot, &sc->sc_ioh,
   1849 					NULL, &sc->sc_ios) == 0) {
   1850 				sc->sc_flags |= WM_F_IOH_VALID;
   1851 			} else {
   1852 				aprint_error_dev(sc->sc_dev,
   1853 				    "WARNING: unable to map I/O space\n");
   1854 			}
   1855 		}
   1856 
   1857 	}
   1858 
   1859 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1860 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1861 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1862 	if (sc->sc_type < WM_T_82542_2_1)
   1863 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1864 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1865 
   1866 	/* power up chip */
   1867 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1868 	    NULL)) && error != EOPNOTSUPP) {
   1869 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1870 		return;
   1871 	}
   1872 
   1873 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1874 	/*
   1875 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1876 	 * resource.
   1877 	 */
   1878 	if (sc->sc_nqueues > 1) {
   1879 		max_type = PCI_INTR_TYPE_MSIX;
   1880 		/*
   1881 		 *  82583 has a MSI-X capability in the PCI configuration space
   1882 		 * but it doesn't support it. At least the document doesn't
   1883 		 * say anything about MSI-X.
   1884 		 */
   1885 		counts[PCI_INTR_TYPE_MSIX]
   1886 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1887 	} else {
   1888 		max_type = PCI_INTR_TYPE_MSI;
   1889 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1890 	}
   1891 
   1892 	/* Allocation settings */
   1893 	counts[PCI_INTR_TYPE_MSI] = 1;
   1894 	counts[PCI_INTR_TYPE_INTX] = 1;
   1895 	/* overridden by disable flags */
   1896 	if (wm_disable_msi != 0) {
   1897 		counts[PCI_INTR_TYPE_MSI] = 0;
   1898 		if (wm_disable_msix != 0) {
   1899 			max_type = PCI_INTR_TYPE_INTX;
   1900 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1901 		}
   1902 	} else if (wm_disable_msix != 0) {
   1903 		max_type = PCI_INTR_TYPE_MSI;
   1904 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1905 	}
   1906 
   1907 alloc_retry:
   1908 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1909 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1910 		return;
   1911 	}
   1912 
   1913 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1914 		error = wm_setup_msix(sc);
   1915 		if (error) {
   1916 			pci_intr_release(pc, sc->sc_intrs,
   1917 			    counts[PCI_INTR_TYPE_MSIX]);
   1918 
   1919 			/* Setup for MSI: Disable MSI-X */
   1920 			max_type = PCI_INTR_TYPE_MSI;
   1921 			counts[PCI_INTR_TYPE_MSI] = 1;
   1922 			counts[PCI_INTR_TYPE_INTX] = 1;
   1923 			goto alloc_retry;
   1924 		}
   1925 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1926 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1927 		error = wm_setup_legacy(sc);
   1928 		if (error) {
   1929 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1930 			    counts[PCI_INTR_TYPE_MSI]);
   1931 
   1932 			/* The next try is for INTx: Disable MSI */
   1933 			max_type = PCI_INTR_TYPE_INTX;
   1934 			counts[PCI_INTR_TYPE_INTX] = 1;
   1935 			goto alloc_retry;
   1936 		}
   1937 	} else {
   1938 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1939 		error = wm_setup_legacy(sc);
   1940 		if (error) {
   1941 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1942 			    counts[PCI_INTR_TYPE_INTX]);
   1943 			return;
   1944 		}
   1945 	}
   1946 
   1947 	/*
   1948 	 * Check the function ID (unit number of the chip).
   1949 	 */
   1950 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1951 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1952 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1953 	    || (sc->sc_type == WM_T_82580)
   1954 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1955 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1956 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1957 	else
   1958 		sc->sc_funcid = 0;
   1959 
   1960 	/*
   1961 	 * Determine a few things about the bus we're connected to.
   1962 	 */
   1963 	if (sc->sc_type < WM_T_82543) {
   1964 		/* We don't really know the bus characteristics here. */
   1965 		sc->sc_bus_speed = 33;
   1966 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1967 		/*
   1968 		 * CSA (Communication Streaming Architecture) is about as fast
   1969 		 * a 32-bit 66MHz PCI Bus.
   1970 		 */
   1971 		sc->sc_flags |= WM_F_CSA;
   1972 		sc->sc_bus_speed = 66;
   1973 		aprint_verbose_dev(sc->sc_dev,
   1974 		    "Communication Streaming Architecture\n");
   1975 		if (sc->sc_type == WM_T_82547) {
   1976 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1977 			callout_setfunc(&sc->sc_txfifo_ch,
   1978 					wm_82547_txfifo_stall, sc);
   1979 			aprint_verbose_dev(sc->sc_dev,
   1980 			    "using 82547 Tx FIFO stall work-around\n");
   1981 		}
   1982 	} else if (sc->sc_type >= WM_T_82571) {
   1983 		sc->sc_flags |= WM_F_PCIE;
   1984 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1985 		    && (sc->sc_type != WM_T_ICH10)
   1986 		    && (sc->sc_type != WM_T_PCH)
   1987 		    && (sc->sc_type != WM_T_PCH2)
   1988 		    && (sc->sc_type != WM_T_PCH_LPT)
   1989 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1990 			/* ICH* and PCH* have no PCIe capability registers */
   1991 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1992 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1993 				NULL) == 0)
   1994 				aprint_error_dev(sc->sc_dev,
   1995 				    "unable to find PCIe capability\n");
   1996 		}
   1997 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1998 	} else {
   1999 		reg = CSR_READ(sc, WMREG_STATUS);
   2000 		if (reg & STATUS_BUS64)
   2001 			sc->sc_flags |= WM_F_BUS64;
   2002 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2003 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2004 
   2005 			sc->sc_flags |= WM_F_PCIX;
   2006 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2007 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2008 				aprint_error_dev(sc->sc_dev,
   2009 				    "unable to find PCIX capability\n");
   2010 			else if (sc->sc_type != WM_T_82545_3 &&
   2011 				 sc->sc_type != WM_T_82546_3) {
   2012 				/*
   2013 				 * Work around a problem caused by the BIOS
   2014 				 * setting the max memory read byte count
   2015 				 * incorrectly.
   2016 				 */
   2017 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2018 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2019 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2020 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2021 
   2022 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2023 				    PCIX_CMD_BYTECNT_SHIFT;
   2024 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2025 				    PCIX_STATUS_MAXB_SHIFT;
   2026 				if (bytecnt > maxb) {
   2027 					aprint_verbose_dev(sc->sc_dev,
   2028 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2029 					    512 << bytecnt, 512 << maxb);
   2030 					pcix_cmd = (pcix_cmd &
   2031 					    ~PCIX_CMD_BYTECNT_MASK) |
   2032 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2033 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2034 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2035 					    pcix_cmd);
   2036 				}
   2037 			}
   2038 		}
   2039 		/*
   2040 		 * The quad port adapter is special; it has a PCIX-PCIX
   2041 		 * bridge on the board, and can run the secondary bus at
   2042 		 * a higher speed.
   2043 		 */
   2044 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2045 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2046 								      : 66;
   2047 		} else if (sc->sc_flags & WM_F_PCIX) {
   2048 			switch (reg & STATUS_PCIXSPD_MASK) {
   2049 			case STATUS_PCIXSPD_50_66:
   2050 				sc->sc_bus_speed = 66;
   2051 				break;
   2052 			case STATUS_PCIXSPD_66_100:
   2053 				sc->sc_bus_speed = 100;
   2054 				break;
   2055 			case STATUS_PCIXSPD_100_133:
   2056 				sc->sc_bus_speed = 133;
   2057 				break;
   2058 			default:
   2059 				aprint_error_dev(sc->sc_dev,
   2060 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2061 				    reg & STATUS_PCIXSPD_MASK);
   2062 				sc->sc_bus_speed = 66;
   2063 				break;
   2064 			}
   2065 		} else
   2066 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2067 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2068 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2069 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2070 	}
   2071 
   2072 	/* Disable ASPM L0s and/or L1 for workaround */
   2073 	wm_disable_aspm(sc);
   2074 
   2075 	/* clear interesting stat counters */
   2076 	CSR_READ(sc, WMREG_COLC);
   2077 	CSR_READ(sc, WMREG_RXERRC);
   2078 
   2079 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2080 	    || (sc->sc_type >= WM_T_ICH8))
   2081 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2082 	if (sc->sc_type >= WM_T_ICH8)
   2083 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2084 
   2085 	/* Set PHY, NVM mutex related stuff */
   2086 	switch (sc->sc_type) {
   2087 	case WM_T_82542_2_0:
   2088 	case WM_T_82542_2_1:
   2089 	case WM_T_82543:
   2090 	case WM_T_82544:
   2091 		/* Microwire */
   2092 		sc->nvm.read = wm_nvm_read_uwire;
   2093 		sc->sc_nvm_wordsize = 64;
   2094 		sc->sc_nvm_addrbits = 6;
   2095 		break;
   2096 	case WM_T_82540:
   2097 	case WM_T_82545:
   2098 	case WM_T_82545_3:
   2099 	case WM_T_82546:
   2100 	case WM_T_82546_3:
   2101 		/* Microwire */
   2102 		sc->nvm.read = wm_nvm_read_uwire;
   2103 		reg = CSR_READ(sc, WMREG_EECD);
   2104 		if (reg & EECD_EE_SIZE) {
   2105 			sc->sc_nvm_wordsize = 256;
   2106 			sc->sc_nvm_addrbits = 8;
   2107 		} else {
   2108 			sc->sc_nvm_wordsize = 64;
   2109 			sc->sc_nvm_addrbits = 6;
   2110 		}
   2111 		sc->sc_flags |= WM_F_LOCK_EECD;
   2112 		sc->nvm.acquire = wm_get_eecd;
   2113 		sc->nvm.release = wm_put_eecd;
   2114 		break;
   2115 	case WM_T_82541:
   2116 	case WM_T_82541_2:
   2117 	case WM_T_82547:
   2118 	case WM_T_82547_2:
   2119 		reg = CSR_READ(sc, WMREG_EECD);
   2120 		/*
   2121 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2122 		 * on 8254[17], so set flags and functios before calling it.
   2123 		 */
   2124 		sc->sc_flags |= WM_F_LOCK_EECD;
   2125 		sc->nvm.acquire = wm_get_eecd;
   2126 		sc->nvm.release = wm_put_eecd;
   2127 		if (reg & EECD_EE_TYPE) {
   2128 			/* SPI */
   2129 			sc->nvm.read = wm_nvm_read_spi;
   2130 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2131 			wm_nvm_set_addrbits_size_eecd(sc);
   2132 		} else {
   2133 			/* Microwire */
   2134 			sc->nvm.read = wm_nvm_read_uwire;
   2135 			if ((reg & EECD_EE_ABITS) != 0) {
   2136 				sc->sc_nvm_wordsize = 256;
   2137 				sc->sc_nvm_addrbits = 8;
   2138 			} else {
   2139 				sc->sc_nvm_wordsize = 64;
   2140 				sc->sc_nvm_addrbits = 6;
   2141 			}
   2142 		}
   2143 		break;
   2144 	case WM_T_82571:
   2145 	case WM_T_82572:
   2146 		/* SPI */
   2147 		sc->nvm.read = wm_nvm_read_eerd;
   2148 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2149 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2150 		wm_nvm_set_addrbits_size_eecd(sc);
   2151 		sc->phy.acquire = wm_get_swsm_semaphore;
   2152 		sc->phy.release = wm_put_swsm_semaphore;
   2153 		sc->nvm.acquire = wm_get_nvm_82571;
   2154 		sc->nvm.release = wm_put_nvm_82571;
   2155 		break;
   2156 	case WM_T_82573:
   2157 	case WM_T_82574:
   2158 	case WM_T_82583:
   2159 		sc->nvm.read = wm_nvm_read_eerd;
   2160 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2161 		if (sc->sc_type == WM_T_82573) {
   2162 			sc->phy.acquire = wm_get_swsm_semaphore;
   2163 			sc->phy.release = wm_put_swsm_semaphore;
   2164 			sc->nvm.acquire = wm_get_nvm_82571;
   2165 			sc->nvm.release = wm_put_nvm_82571;
   2166 		} else {
   2167 			/* Both PHY and NVM use the same semaphore. */
   2168 			sc->phy.acquire = sc->nvm.acquire
   2169 			    = wm_get_swfwhw_semaphore;
   2170 			sc->phy.release = sc->nvm.release
   2171 			    = wm_put_swfwhw_semaphore;
   2172 		}
   2173 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2174 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2175 			sc->sc_nvm_wordsize = 2048;
   2176 		} else {
   2177 			/* SPI */
   2178 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2179 			wm_nvm_set_addrbits_size_eecd(sc);
   2180 		}
   2181 		break;
   2182 	case WM_T_82575:
   2183 	case WM_T_82576:
   2184 	case WM_T_82580:
   2185 	case WM_T_I350:
   2186 	case WM_T_I354:
   2187 	case WM_T_80003:
   2188 		/* SPI */
   2189 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2190 		wm_nvm_set_addrbits_size_eecd(sc);
   2191 		if((sc->sc_type == WM_T_80003)
   2192 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2193 			sc->nvm.read = wm_nvm_read_eerd;
   2194 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2195 		} else {
   2196 			sc->nvm.read = wm_nvm_read_spi;
   2197 			sc->sc_flags |= WM_F_LOCK_EECD;
   2198 		}
   2199 		sc->phy.acquire = wm_get_phy_82575;
   2200 		sc->phy.release = wm_put_phy_82575;
   2201 		sc->nvm.acquire = wm_get_nvm_80003;
   2202 		sc->nvm.release = wm_put_nvm_80003;
   2203 		break;
   2204 	case WM_T_ICH8:
   2205 	case WM_T_ICH9:
   2206 	case WM_T_ICH10:
   2207 	case WM_T_PCH:
   2208 	case WM_T_PCH2:
   2209 	case WM_T_PCH_LPT:
   2210 		sc->nvm.read = wm_nvm_read_ich8;
   2211 		/* FLASH */
   2212 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2213 		sc->sc_nvm_wordsize = 2048;
   2214 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2215 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2216 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2217 			aprint_error_dev(sc->sc_dev,
   2218 			    "can't map FLASH registers\n");
   2219 			goto out;
   2220 		}
   2221 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2222 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2223 		    ICH_FLASH_SECTOR_SIZE;
   2224 		sc->sc_ich8_flash_bank_size =
   2225 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2226 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2227 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2228 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2229 		sc->sc_flashreg_offset = 0;
   2230 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2231 		sc->phy.release = wm_put_swflag_ich8lan;
   2232 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2233 		sc->nvm.release = wm_put_nvm_ich8lan;
   2234 		break;
   2235 	case WM_T_PCH_SPT:
   2236 		sc->nvm.read = wm_nvm_read_spt;
   2237 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2238 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2239 		sc->sc_flasht = sc->sc_st;
   2240 		sc->sc_flashh = sc->sc_sh;
   2241 		sc->sc_ich8_flash_base = 0;
   2242 		sc->sc_nvm_wordsize =
   2243 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2244 			* NVM_SIZE_MULTIPLIER;
   2245 		/* It is size in bytes, we want words */
   2246 		sc->sc_nvm_wordsize /= 2;
   2247 		/* assume 2 banks */
   2248 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2249 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2250 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2251 		sc->phy.release = wm_put_swflag_ich8lan;
   2252 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2253 		sc->nvm.release = wm_put_nvm_ich8lan;
   2254 		break;
   2255 	case WM_T_I210:
   2256 	case WM_T_I211:
   2257 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2258 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2259 		if (wm_nvm_flash_presence_i210(sc)) {
   2260 			sc->nvm.read = wm_nvm_read_eerd;
   2261 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2262 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2263 			wm_nvm_set_addrbits_size_eecd(sc);
   2264 		} else {
   2265 			sc->nvm.read = wm_nvm_read_invm;
   2266 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2267 			sc->sc_nvm_wordsize = INVM_SIZE;
   2268 		}
   2269 		sc->phy.acquire = wm_get_phy_82575;
   2270 		sc->phy.release = wm_put_phy_82575;
   2271 		sc->nvm.acquire = wm_get_nvm_80003;
   2272 		sc->nvm.release = wm_put_nvm_80003;
   2273 		break;
   2274 	default:
   2275 		break;
   2276 	}
   2277 
   2278 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2279 	switch (sc->sc_type) {
   2280 	case WM_T_82571:
   2281 	case WM_T_82572:
   2282 		reg = CSR_READ(sc, WMREG_SWSM2);
   2283 		if ((reg & SWSM2_LOCK) == 0) {
   2284 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2285 			force_clear_smbi = true;
   2286 		} else
   2287 			force_clear_smbi = false;
   2288 		break;
   2289 	case WM_T_82573:
   2290 	case WM_T_82574:
   2291 	case WM_T_82583:
   2292 		force_clear_smbi = true;
   2293 		break;
   2294 	default:
   2295 		force_clear_smbi = false;
   2296 		break;
   2297 	}
   2298 	if (force_clear_smbi) {
   2299 		reg = CSR_READ(sc, WMREG_SWSM);
   2300 		if ((reg & SWSM_SMBI) != 0)
   2301 			aprint_error_dev(sc->sc_dev,
   2302 			    "Please update the Bootagent\n");
   2303 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2304 	}
   2305 
   2306 	/*
   2307 	 * Defer printing the EEPROM type until after verifying the checksum
   2308 	 * This allows the EEPROM type to be printed correctly in the case
   2309 	 * that no EEPROM is attached.
   2310 	 */
   2311 	/*
   2312 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2313 	 * this for later, so we can fail future reads from the EEPROM.
   2314 	 */
   2315 	if (wm_nvm_validate_checksum(sc)) {
   2316 		/*
   2317 		 * Read twice again because some PCI-e parts fail the
   2318 		 * first check due to the link being in sleep state.
   2319 		 */
   2320 		if (wm_nvm_validate_checksum(sc))
   2321 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2322 	}
   2323 
   2324 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2325 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2326 	else {
   2327 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2328 		    sc->sc_nvm_wordsize);
   2329 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2330 			aprint_verbose("iNVM");
   2331 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2332 			aprint_verbose("FLASH(HW)");
   2333 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2334 			aprint_verbose("FLASH");
   2335 		else {
   2336 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2337 				eetype = "SPI";
   2338 			else
   2339 				eetype = "MicroWire";
   2340 			aprint_verbose("(%d address bits) %s EEPROM",
   2341 			    sc->sc_nvm_addrbits, eetype);
   2342 		}
   2343 	}
   2344 	wm_nvm_version(sc);
   2345 	aprint_verbose("\n");
   2346 
   2347 	/*
   2348 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2349 	 * incorrect.
   2350 	 */
   2351 	wm_gmii_setup_phytype(sc, 0, 0);
   2352 
   2353 	/* Reset the chip to a known state. */
   2354 	wm_reset(sc);
   2355 
   2356 	/*
   2357 	 * Check for I21[01] PLL workaround.
   2358 	 *
   2359 	 * Three cases:
   2360 	 * a) Chip is I211.
   2361 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2362 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2363 	 */
   2364 	if (sc->sc_type == WM_T_I211)
   2365 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2366 	if (sc->sc_type == WM_T_I210) {
   2367 		if (!wm_nvm_flash_presence_i210(sc))
   2368 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2369 		else if ((sc->sc_nvm_ver_major < 3)
   2370 		    || ((sc->sc_nvm_ver_major == 3)
   2371 			&& (sc->sc_nvm_ver_minor < 25))) {
   2372 			aprint_verbose_dev(sc->sc_dev,
   2373 			    "ROM image version %d.%d is older than 3.25\n",
   2374 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2375 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2376 		}
   2377 	}
   2378 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2379 		wm_pll_workaround_i210(sc);
   2380 
   2381 	wm_get_wakeup(sc);
   2382 
   2383 	/* Non-AMT based hardware can now take control from firmware */
   2384 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2385 		wm_get_hw_control(sc);
   2386 
   2387 	/*
   2388 	 * Read the Ethernet address from the EEPROM, if not first found
   2389 	 * in device properties.
   2390 	 */
   2391 	ea = prop_dictionary_get(dict, "mac-address");
   2392 	if (ea != NULL) {
   2393 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2394 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2395 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2396 	} else {
   2397 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2398 			aprint_error_dev(sc->sc_dev,
   2399 			    "unable to read Ethernet address\n");
   2400 			goto out;
   2401 		}
   2402 	}
   2403 
   2404 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2405 	    ether_sprintf(enaddr));
   2406 
   2407 	/*
   2408 	 * Read the config info from the EEPROM, and set up various
   2409 	 * bits in the control registers based on their contents.
   2410 	 */
   2411 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2412 	if (pn != NULL) {
   2413 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2414 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2415 	} else {
   2416 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2417 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2418 			goto out;
   2419 		}
   2420 	}
   2421 
   2422 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2423 	if (pn != NULL) {
   2424 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2425 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2426 	} else {
   2427 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2428 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2429 			goto out;
   2430 		}
   2431 	}
   2432 
   2433 	/* check for WM_F_WOL */
   2434 	switch (sc->sc_type) {
   2435 	case WM_T_82542_2_0:
   2436 	case WM_T_82542_2_1:
   2437 	case WM_T_82543:
   2438 		/* dummy? */
   2439 		eeprom_data = 0;
   2440 		apme_mask = NVM_CFG3_APME;
   2441 		break;
   2442 	case WM_T_82544:
   2443 		apme_mask = NVM_CFG2_82544_APM_EN;
   2444 		eeprom_data = cfg2;
   2445 		break;
   2446 	case WM_T_82546:
   2447 	case WM_T_82546_3:
   2448 	case WM_T_82571:
   2449 	case WM_T_82572:
   2450 	case WM_T_82573:
   2451 	case WM_T_82574:
   2452 	case WM_T_82583:
   2453 	case WM_T_80003:
   2454 	default:
   2455 		apme_mask = NVM_CFG3_APME;
   2456 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2457 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2458 		break;
   2459 	case WM_T_82575:
   2460 	case WM_T_82576:
   2461 	case WM_T_82580:
   2462 	case WM_T_I350:
   2463 	case WM_T_I354: /* XXX ok? */
   2464 	case WM_T_ICH8:
   2465 	case WM_T_ICH9:
   2466 	case WM_T_ICH10:
   2467 	case WM_T_PCH:
   2468 	case WM_T_PCH2:
   2469 	case WM_T_PCH_LPT:
   2470 	case WM_T_PCH_SPT:
   2471 		/* XXX The funcid should be checked on some devices */
   2472 		apme_mask = WUC_APME;
   2473 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2474 		break;
   2475 	}
   2476 
   2477 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2478 	if ((eeprom_data & apme_mask) != 0)
   2479 		sc->sc_flags |= WM_F_WOL;
   2480 
   2481 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2482 		/* Check NVM for autonegotiation */
   2483 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2484 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2485 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2486 		}
   2487 	}
   2488 
   2489 	/*
   2490 	 * XXX need special handling for some multiple port cards
   2491 	 * to disable a paticular port.
   2492 	 */
   2493 
   2494 	if (sc->sc_type >= WM_T_82544) {
   2495 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2496 		if (pn != NULL) {
   2497 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2498 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2499 		} else {
   2500 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2501 				aprint_error_dev(sc->sc_dev,
   2502 				    "unable to read SWDPIN\n");
   2503 				goto out;
   2504 			}
   2505 		}
   2506 	}
   2507 
   2508 	if (cfg1 & NVM_CFG1_ILOS)
   2509 		sc->sc_ctrl |= CTRL_ILOS;
   2510 
   2511 	/*
   2512 	 * XXX
   2513 	 * This code isn't correct because pin 2 and 3 are located
   2514 	 * in different position on newer chips. Check all datasheet.
   2515 	 *
   2516 	 * Until resolve this problem, check if a chip < 82580
   2517 	 */
   2518 	if (sc->sc_type <= WM_T_82580) {
   2519 		if (sc->sc_type >= WM_T_82544) {
   2520 			sc->sc_ctrl |=
   2521 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2522 			    CTRL_SWDPIO_SHIFT;
   2523 			sc->sc_ctrl |=
   2524 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2525 			    CTRL_SWDPINS_SHIFT;
   2526 		} else {
   2527 			sc->sc_ctrl |=
   2528 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2529 			    CTRL_SWDPIO_SHIFT;
   2530 		}
   2531 	}
   2532 
   2533 	/* XXX For other than 82580? */
   2534 	if (sc->sc_type == WM_T_82580) {
   2535 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2536 		if (nvmword & __BIT(13))
   2537 			sc->sc_ctrl |= CTRL_ILOS;
   2538 	}
   2539 
   2540 #if 0
   2541 	if (sc->sc_type >= WM_T_82544) {
   2542 		if (cfg1 & NVM_CFG1_IPS0)
   2543 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2544 		if (cfg1 & NVM_CFG1_IPS1)
   2545 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2546 		sc->sc_ctrl_ext |=
   2547 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2548 		    CTRL_EXT_SWDPIO_SHIFT;
   2549 		sc->sc_ctrl_ext |=
   2550 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2551 		    CTRL_EXT_SWDPINS_SHIFT;
   2552 	} else {
   2553 		sc->sc_ctrl_ext |=
   2554 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2555 		    CTRL_EXT_SWDPIO_SHIFT;
   2556 	}
   2557 #endif
   2558 
   2559 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2560 #if 0
   2561 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2562 #endif
   2563 
   2564 	if (sc->sc_type == WM_T_PCH) {
   2565 		uint16_t val;
   2566 
   2567 		/* Save the NVM K1 bit setting */
   2568 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2569 
   2570 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2571 			sc->sc_nvm_k1_enabled = 1;
   2572 		else
   2573 			sc->sc_nvm_k1_enabled = 0;
   2574 	}
   2575 
   2576 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2577 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2578 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2579 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2580 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2581 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2582 		/* Copper only */
   2583 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2584 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2585 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2586 	    || (sc->sc_type ==WM_T_I211)) {
   2587 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2588 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2589 		switch (link_mode) {
   2590 		case CTRL_EXT_LINK_MODE_1000KX:
   2591 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2592 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2593 			break;
   2594 		case CTRL_EXT_LINK_MODE_SGMII:
   2595 			if (wm_sgmii_uses_mdio(sc)) {
   2596 				aprint_verbose_dev(sc->sc_dev,
   2597 				    "SGMII(MDIO)\n");
   2598 				sc->sc_flags |= WM_F_SGMII;
   2599 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2600 				break;
   2601 			}
   2602 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2603 			/*FALLTHROUGH*/
   2604 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2605 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2606 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2607 				if (link_mode
   2608 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2609 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2610 					sc->sc_flags |= WM_F_SGMII;
   2611 				} else {
   2612 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2613 					aprint_verbose_dev(sc->sc_dev,
   2614 					    "SERDES\n");
   2615 				}
   2616 				break;
   2617 			}
   2618 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2619 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2620 
   2621 			/* Change current link mode setting */
   2622 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2623 			switch (sc->sc_mediatype) {
   2624 			case WM_MEDIATYPE_COPPER:
   2625 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2626 				break;
   2627 			case WM_MEDIATYPE_SERDES:
   2628 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2629 				break;
   2630 			default:
   2631 				break;
   2632 			}
   2633 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2634 			break;
   2635 		case CTRL_EXT_LINK_MODE_GMII:
   2636 		default:
   2637 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2638 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2639 			break;
   2640 		}
   2641 
   2642 		reg &= ~CTRL_EXT_I2C_ENA;
   2643 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2644 			reg |= CTRL_EXT_I2C_ENA;
   2645 		else
   2646 			reg &= ~CTRL_EXT_I2C_ENA;
   2647 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2648 	} else if (sc->sc_type < WM_T_82543 ||
   2649 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2650 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2651 			aprint_error_dev(sc->sc_dev,
   2652 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2653 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2654 		}
   2655 	} else {
   2656 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2657 			aprint_error_dev(sc->sc_dev,
   2658 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2659 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2660 		}
   2661 	}
   2662 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2663 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2664 
   2665 	/* Set device properties (macflags) */
   2666 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2667 
   2668 	/* Initialize the media structures accordingly. */
   2669 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2670 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2671 	else
   2672 		wm_tbi_mediainit(sc); /* All others */
   2673 
   2674 	ifp = &sc->sc_ethercom.ec_if;
   2675 	xname = device_xname(sc->sc_dev);
   2676 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2677 	ifp->if_softc = sc;
   2678 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2679 #ifdef WM_MPSAFE
   2680 	ifp->if_extflags = IFEF_MPSAFE;
   2681 #endif
   2682 	ifp->if_ioctl = wm_ioctl;
   2683 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2684 		ifp->if_start = wm_nq_start;
   2685 		/*
   2686 		 * When the number of CPUs is one and the controller can use
   2687 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2688 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2689 		 * and the other is used for link status changing.
   2690 		 * In this situation, wm_nq_transmit() is disadvantageous
   2691 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2692 		 */
   2693 		if (wm_is_using_multiqueue(sc))
   2694 			ifp->if_transmit = wm_nq_transmit;
   2695 	} else {
   2696 		ifp->if_start = wm_start;
   2697 		/*
   2698 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2699 		 */
   2700 		if (wm_is_using_multiqueue(sc))
   2701 			ifp->if_transmit = wm_transmit;
   2702 	}
   2703 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2704 	ifp->if_init = wm_init;
   2705 	ifp->if_stop = wm_stop;
   2706 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2707 	IFQ_SET_READY(&ifp->if_snd);
   2708 
   2709 	/* Check for jumbo frame */
   2710 	switch (sc->sc_type) {
   2711 	case WM_T_82573:
   2712 		/* XXX limited to 9234 if ASPM is disabled */
   2713 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2714 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2715 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2716 		break;
   2717 	case WM_T_82571:
   2718 	case WM_T_82572:
   2719 	case WM_T_82574:
   2720 	case WM_T_82583:
   2721 	case WM_T_82575:
   2722 	case WM_T_82576:
   2723 	case WM_T_82580:
   2724 	case WM_T_I350:
   2725 	case WM_T_I354:
   2726 	case WM_T_I210:
   2727 	case WM_T_I211:
   2728 	case WM_T_80003:
   2729 	case WM_T_ICH9:
   2730 	case WM_T_ICH10:
   2731 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2732 	case WM_T_PCH_LPT:
   2733 	case WM_T_PCH_SPT:
   2734 		/* XXX limited to 9234 */
   2735 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2736 		break;
   2737 	case WM_T_PCH:
   2738 		/* XXX limited to 4096 */
   2739 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2740 		break;
   2741 	case WM_T_82542_2_0:
   2742 	case WM_T_82542_2_1:
   2743 	case WM_T_ICH8:
   2744 		/* No support for jumbo frame */
   2745 		break;
   2746 	default:
   2747 		/* ETHER_MAX_LEN_JUMBO */
   2748 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2749 		break;
   2750 	}
   2751 
   2752 	/* If we're a i82543 or greater, we can support VLANs. */
   2753 	if (sc->sc_type >= WM_T_82543)
   2754 		sc->sc_ethercom.ec_capabilities |=
   2755 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2756 
   2757 	/*
   2758 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2759 	 * on i82543 and later.
   2760 	 */
   2761 	if (sc->sc_type >= WM_T_82543) {
   2762 		ifp->if_capabilities |=
   2763 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2764 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2765 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2766 		    IFCAP_CSUM_TCPv6_Tx |
   2767 		    IFCAP_CSUM_UDPv6_Tx;
   2768 	}
   2769 
   2770 	/*
   2771 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2772 	 *
   2773 	 *	82541GI (8086:1076) ... no
   2774 	 *	82572EI (8086:10b9) ... yes
   2775 	 */
   2776 	if (sc->sc_type >= WM_T_82571) {
   2777 		ifp->if_capabilities |=
   2778 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2779 	}
   2780 
   2781 	/*
   2782 	 * If we're a i82544 or greater (except i82547), we can do
   2783 	 * TCP segmentation offload.
   2784 	 */
   2785 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2786 		ifp->if_capabilities |= IFCAP_TSOv4;
   2787 	}
   2788 
   2789 	if (sc->sc_type >= WM_T_82571) {
   2790 		ifp->if_capabilities |= IFCAP_TSOv6;
   2791 	}
   2792 
   2793 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2794 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2795 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2796 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2797 
   2798 #ifdef WM_MPSAFE
   2799 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2800 #else
   2801 	sc->sc_core_lock = NULL;
   2802 #endif
   2803 
   2804 	/* Attach the interface. */
   2805 	error = if_initialize(ifp);
   2806 	if (error != 0) {
   2807 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2808 		    error);
   2809 		return; /* Error */
   2810 	}
   2811 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2812 	ether_ifattach(ifp, enaddr);
   2813 	if_register(ifp);
   2814 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2815 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2816 			  RND_FLAG_DEFAULT);
   2817 
   2818 #ifdef WM_EVENT_COUNTERS
   2819 	/* Attach event counters. */
   2820 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2821 	    NULL, xname, "linkintr");
   2822 
   2823 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2824 	    NULL, xname, "tx_xoff");
   2825 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2826 	    NULL, xname, "tx_xon");
   2827 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2828 	    NULL, xname, "rx_xoff");
   2829 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2830 	    NULL, xname, "rx_xon");
   2831 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2832 	    NULL, xname, "rx_macctl");
   2833 #endif /* WM_EVENT_COUNTERS */
   2834 
   2835 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2836 		pmf_class_network_register(self, ifp);
   2837 	else
   2838 		aprint_error_dev(self, "couldn't establish power handler\n");
   2839 
   2840 	sc->sc_flags |= WM_F_ATTACHED;
   2841  out:
   2842 	return;
   2843 }
   2844 
   2845 /* The detach function (ca_detach) */
   2846 static int
   2847 wm_detach(device_t self, int flags __unused)
   2848 {
   2849 	struct wm_softc *sc = device_private(self);
   2850 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2851 	int i;
   2852 
   2853 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2854 		return 0;
   2855 
   2856 	/* Stop the interface. Callouts are stopped in it. */
   2857 	wm_stop(ifp, 1);
   2858 
   2859 	pmf_device_deregister(self);
   2860 
   2861 #ifdef WM_EVENT_COUNTERS
   2862 	evcnt_detach(&sc->sc_ev_linkintr);
   2863 
   2864 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2865 	evcnt_detach(&sc->sc_ev_tx_xon);
   2866 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2867 	evcnt_detach(&sc->sc_ev_rx_xon);
   2868 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2869 #endif /* WM_EVENT_COUNTERS */
   2870 
   2871 	/* Tell the firmware about the release */
   2872 	WM_CORE_LOCK(sc);
   2873 	wm_release_manageability(sc);
   2874 	wm_release_hw_control(sc);
   2875 	wm_enable_wakeup(sc);
   2876 	WM_CORE_UNLOCK(sc);
   2877 
   2878 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2879 
   2880 	/* Delete all remaining media. */
   2881 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2882 
   2883 	ether_ifdetach(ifp);
   2884 	if_detach(ifp);
   2885 	if_percpuq_destroy(sc->sc_ipq);
   2886 
   2887 	/* Unload RX dmamaps and free mbufs */
   2888 	for (i = 0; i < sc->sc_nqueues; i++) {
   2889 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2890 		mutex_enter(rxq->rxq_lock);
   2891 		wm_rxdrain(rxq);
   2892 		mutex_exit(rxq->rxq_lock);
   2893 	}
   2894 	/* Must unlock here */
   2895 
   2896 	/* Disestablish the interrupt handler */
   2897 	for (i = 0; i < sc->sc_nintrs; i++) {
   2898 		if (sc->sc_ihs[i] != NULL) {
   2899 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2900 			sc->sc_ihs[i] = NULL;
   2901 		}
   2902 	}
   2903 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2904 
   2905 	wm_free_txrx_queues(sc);
   2906 
   2907 	/* Unmap the registers */
   2908 	if (sc->sc_ss) {
   2909 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2910 		sc->sc_ss = 0;
   2911 	}
   2912 	if (sc->sc_ios) {
   2913 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2914 		sc->sc_ios = 0;
   2915 	}
   2916 	if (sc->sc_flashs) {
   2917 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2918 		sc->sc_flashs = 0;
   2919 	}
   2920 
   2921 	if (sc->sc_core_lock)
   2922 		mutex_obj_free(sc->sc_core_lock);
   2923 	if (sc->sc_ich_phymtx)
   2924 		mutex_obj_free(sc->sc_ich_phymtx);
   2925 	if (sc->sc_ich_nvmmtx)
   2926 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2927 
   2928 	return 0;
   2929 }
   2930 
   2931 static bool
   2932 wm_suspend(device_t self, const pmf_qual_t *qual)
   2933 {
   2934 	struct wm_softc *sc = device_private(self);
   2935 
   2936 	wm_release_manageability(sc);
   2937 	wm_release_hw_control(sc);
   2938 	wm_enable_wakeup(sc);
   2939 
   2940 	return true;
   2941 }
   2942 
   2943 static bool
   2944 wm_resume(device_t self, const pmf_qual_t *qual)
   2945 {
   2946 	struct wm_softc *sc = device_private(self);
   2947 
   2948 	/* Disable ASPM L0s and/or L1 for workaround */
   2949 	wm_disable_aspm(sc);
   2950 	wm_init_manageability(sc);
   2951 
   2952 	return true;
   2953 }
   2954 
   2955 /*
   2956  * wm_watchdog:		[ifnet interface function]
   2957  *
   2958  *	Watchdog timer handler.
   2959  */
   2960 static void
   2961 wm_watchdog(struct ifnet *ifp)
   2962 {
   2963 	int qid;
   2964 	struct wm_softc *sc = ifp->if_softc;
   2965 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2966 
   2967 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2968 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2969 
   2970 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2971 	}
   2972 
   2973 	/*
   2974 	 * IF any of queues hanged up, reset the interface.
   2975 	 */
   2976 	if (hang_queue != 0) {
   2977 		(void) wm_init(ifp);
   2978 
   2979 		/*
   2980 		 * There are still some upper layer processing which call
   2981 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   2982 		 */
   2983 		/* Try to get more packets going. */
   2984 		ifp->if_start(ifp);
   2985 	}
   2986 }
   2987 
   2988 
   2989 static void
   2990 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   2991 {
   2992 
   2993 	mutex_enter(txq->txq_lock);
   2994 	if (txq->txq_watchdog &&
   2995 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   2996 		wm_watchdog_txq_locked(ifp, txq, hang);
   2997 	}
   2998 	mutex_exit(txq->txq_lock);
   2999 }
   3000 
   3001 static void
   3002 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3003 {
   3004 	struct wm_softc *sc = ifp->if_softc;
   3005 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3006 
   3007 	KASSERT(mutex_owned(txq->txq_lock));
   3008 
   3009 	/*
   3010 	 * Since we're using delayed interrupts, sweep up
   3011 	 * before we report an error.
   3012 	 */
   3013 	wm_txeof(txq, UINT_MAX);
   3014 	if (txq->txq_watchdog)
   3015 		*hang |= __BIT(wmq->wmq_id);
   3016 
   3017 	if (txq->txq_free != WM_NTXDESC(txq)) {
   3018 #ifdef WM_DEBUG
   3019 		int i, j;
   3020 		struct wm_txsoft *txs;
   3021 #endif
   3022 		log(LOG_ERR,
   3023 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3024 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3025 		    txq->txq_next);
   3026 		ifp->if_oerrors++;
   3027 #ifdef WM_DEBUG
   3028 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3029 		    i = WM_NEXTTXS(txq, i)) {
   3030 		    txs = &txq->txq_soft[i];
   3031 		    printf("txs %d tx %d -> %d\n",
   3032 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3033 		    for (j = txs->txs_firstdesc; ;
   3034 			j = WM_NEXTTX(txq, j)) {
   3035 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3036 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3037 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3038 				    printf("\t %#08x%08x\n",
   3039 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3040 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3041 			    } else {
   3042 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3043 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3044 					txq->txq_descs[j].wtx_addr.wa_low);
   3045 				    printf("\t %#04x%02x%02x%08x\n",
   3046 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3047 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3048 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3049 					txq->txq_descs[j].wtx_cmdlen);
   3050 			    }
   3051 			if (j == txs->txs_lastdesc)
   3052 				break;
   3053 			}
   3054 		}
   3055 #endif
   3056 	}
   3057 }
   3058 
   3059 /*
   3060  * wm_tick:
   3061  *
   3062  *	One second timer, used to check link status, sweep up
   3063  *	completed transmit jobs, etc.
   3064  */
   3065 static void
   3066 wm_tick(void *arg)
   3067 {
   3068 	struct wm_softc *sc = arg;
   3069 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3070 #ifndef WM_MPSAFE
   3071 	int s = splnet();
   3072 #endif
   3073 
   3074 	WM_CORE_LOCK(sc);
   3075 
   3076 	if (sc->sc_core_stopping) {
   3077 		WM_CORE_UNLOCK(sc);
   3078 #ifndef WM_MPSAFE
   3079 		splx(s);
   3080 #endif
   3081 		return;
   3082 	}
   3083 
   3084 	if (sc->sc_type >= WM_T_82542_2_1) {
   3085 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3086 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3087 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3088 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3089 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3090 	}
   3091 
   3092 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3093 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3094 	    + CSR_READ(sc, WMREG_CRCERRS)
   3095 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3096 	    + CSR_READ(sc, WMREG_SYMERRC)
   3097 	    + CSR_READ(sc, WMREG_RXERRC)
   3098 	    + CSR_READ(sc, WMREG_SEC)
   3099 	    + CSR_READ(sc, WMREG_CEXTERR)
   3100 	    + CSR_READ(sc, WMREG_RLEC);
   3101 	/*
   3102 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3103 	 * memory. It does not mean the number of dropped packet. Because
   3104 	 * ethernet controller can receive packets in such case if there is
   3105 	 * space in phy's FIFO.
   3106 	 *
   3107 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3108 	 * own EVCNT instead of if_iqdrops.
   3109 	 */
   3110 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3111 
   3112 	if (sc->sc_flags & WM_F_HAS_MII)
   3113 		mii_tick(&sc->sc_mii);
   3114 	else if ((sc->sc_type >= WM_T_82575)
   3115 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3116 		wm_serdes_tick(sc);
   3117 	else
   3118 		wm_tbi_tick(sc);
   3119 
   3120 	WM_CORE_UNLOCK(sc);
   3121 
   3122 	wm_watchdog(ifp);
   3123 
   3124 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3125 }
   3126 
   3127 static int
   3128 wm_ifflags_cb(struct ethercom *ec)
   3129 {
   3130 	struct ifnet *ifp = &ec->ec_if;
   3131 	struct wm_softc *sc = ifp->if_softc;
   3132 	int rc = 0;
   3133 
   3134 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3135 		device_xname(sc->sc_dev), __func__));
   3136 
   3137 	WM_CORE_LOCK(sc);
   3138 
   3139 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3140 	sc->sc_if_flags = ifp->if_flags;
   3141 
   3142 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3143 		rc = ENETRESET;
   3144 		goto out;
   3145 	}
   3146 
   3147 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3148 		wm_set_filter(sc);
   3149 
   3150 	wm_set_vlan(sc);
   3151 
   3152 out:
   3153 	WM_CORE_UNLOCK(sc);
   3154 
   3155 	return rc;
   3156 }
   3157 
   3158 /*
   3159  * wm_ioctl:		[ifnet interface function]
   3160  *
   3161  *	Handle control requests from the operator.
   3162  */
   3163 static int
   3164 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3165 {
   3166 	struct wm_softc *sc = ifp->if_softc;
   3167 	struct ifreq *ifr = (struct ifreq *) data;
   3168 	struct ifaddr *ifa = (struct ifaddr *)data;
   3169 	struct sockaddr_dl *sdl;
   3170 	int s, error;
   3171 
   3172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3173 		device_xname(sc->sc_dev), __func__));
   3174 
   3175 #ifndef WM_MPSAFE
   3176 	s = splnet();
   3177 #endif
   3178 	switch (cmd) {
   3179 	case SIOCSIFMEDIA:
   3180 	case SIOCGIFMEDIA:
   3181 		WM_CORE_LOCK(sc);
   3182 		/* Flow control requires full-duplex mode. */
   3183 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3184 		    (ifr->ifr_media & IFM_FDX) == 0)
   3185 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3186 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3187 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3188 				/* We can do both TXPAUSE and RXPAUSE. */
   3189 				ifr->ifr_media |=
   3190 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3191 			}
   3192 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3193 		}
   3194 		WM_CORE_UNLOCK(sc);
   3195 #ifdef WM_MPSAFE
   3196 		s = splnet();
   3197 #endif
   3198 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3199 #ifdef WM_MPSAFE
   3200 		splx(s);
   3201 #endif
   3202 		break;
   3203 	case SIOCINITIFADDR:
   3204 		WM_CORE_LOCK(sc);
   3205 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3206 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3207 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3208 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3209 			/* unicast address is first multicast entry */
   3210 			wm_set_filter(sc);
   3211 			error = 0;
   3212 			WM_CORE_UNLOCK(sc);
   3213 			break;
   3214 		}
   3215 		WM_CORE_UNLOCK(sc);
   3216 		/*FALLTHROUGH*/
   3217 	default:
   3218 #ifdef WM_MPSAFE
   3219 		s = splnet();
   3220 #endif
   3221 		/* It may call wm_start, so unlock here */
   3222 		error = ether_ioctl(ifp, cmd, data);
   3223 #ifdef WM_MPSAFE
   3224 		splx(s);
   3225 #endif
   3226 		if (error != ENETRESET)
   3227 			break;
   3228 
   3229 		error = 0;
   3230 
   3231 		if (cmd == SIOCSIFCAP) {
   3232 			error = (*ifp->if_init)(ifp);
   3233 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3234 			;
   3235 		else if (ifp->if_flags & IFF_RUNNING) {
   3236 			/*
   3237 			 * Multicast list has changed; set the hardware filter
   3238 			 * accordingly.
   3239 			 */
   3240 			WM_CORE_LOCK(sc);
   3241 			wm_set_filter(sc);
   3242 			WM_CORE_UNLOCK(sc);
   3243 		}
   3244 		break;
   3245 	}
   3246 
   3247 #ifndef WM_MPSAFE
   3248 	splx(s);
   3249 #endif
   3250 	return error;
   3251 }
   3252 
   3253 /* MAC address related */
   3254 
   3255 /*
   3256  * Get the offset of MAC address and return it.
   3257  * If error occured, use offset 0.
   3258  */
   3259 static uint16_t
   3260 wm_check_alt_mac_addr(struct wm_softc *sc)
   3261 {
   3262 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3263 	uint16_t offset = NVM_OFF_MACADDR;
   3264 
   3265 	/* Try to read alternative MAC address pointer */
   3266 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3267 		return 0;
   3268 
   3269 	/* Check pointer if it's valid or not. */
   3270 	if ((offset == 0x0000) || (offset == 0xffff))
   3271 		return 0;
   3272 
   3273 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3274 	/*
   3275 	 * Check whether alternative MAC address is valid or not.
   3276 	 * Some cards have non 0xffff pointer but those don't use
   3277 	 * alternative MAC address in reality.
   3278 	 *
   3279 	 * Check whether the broadcast bit is set or not.
   3280 	 */
   3281 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3282 		if (((myea[0] & 0xff) & 0x01) == 0)
   3283 			return offset; /* Found */
   3284 
   3285 	/* Not found */
   3286 	return 0;
   3287 }
   3288 
   3289 static int
   3290 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3291 {
   3292 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3293 	uint16_t offset = NVM_OFF_MACADDR;
   3294 	int do_invert = 0;
   3295 
   3296 	switch (sc->sc_type) {
   3297 	case WM_T_82580:
   3298 	case WM_T_I350:
   3299 	case WM_T_I354:
   3300 		/* EEPROM Top Level Partitioning */
   3301 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3302 		break;
   3303 	case WM_T_82571:
   3304 	case WM_T_82575:
   3305 	case WM_T_82576:
   3306 	case WM_T_80003:
   3307 	case WM_T_I210:
   3308 	case WM_T_I211:
   3309 		offset = wm_check_alt_mac_addr(sc);
   3310 		if (offset == 0)
   3311 			if ((sc->sc_funcid & 0x01) == 1)
   3312 				do_invert = 1;
   3313 		break;
   3314 	default:
   3315 		if ((sc->sc_funcid & 0x01) == 1)
   3316 			do_invert = 1;
   3317 		break;
   3318 	}
   3319 
   3320 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3321 		goto bad;
   3322 
   3323 	enaddr[0] = myea[0] & 0xff;
   3324 	enaddr[1] = myea[0] >> 8;
   3325 	enaddr[2] = myea[1] & 0xff;
   3326 	enaddr[3] = myea[1] >> 8;
   3327 	enaddr[4] = myea[2] & 0xff;
   3328 	enaddr[5] = myea[2] >> 8;
   3329 
   3330 	/*
   3331 	 * Toggle the LSB of the MAC address on the second port
   3332 	 * of some dual port cards.
   3333 	 */
   3334 	if (do_invert != 0)
   3335 		enaddr[5] ^= 1;
   3336 
   3337 	return 0;
   3338 
   3339  bad:
   3340 	return -1;
   3341 }
   3342 
   3343 /*
   3344  * wm_set_ral:
   3345  *
   3346  *	Set an entery in the receive address list.
   3347  */
   3348 static void
   3349 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3350 {
   3351 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3352 	uint32_t wlock_mac;
   3353 	int rv;
   3354 
   3355 	if (enaddr != NULL) {
   3356 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3357 		    (enaddr[3] << 24);
   3358 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3359 		ral_hi |= RAL_AV;
   3360 	} else {
   3361 		ral_lo = 0;
   3362 		ral_hi = 0;
   3363 	}
   3364 
   3365 	switch (sc->sc_type) {
   3366 	case WM_T_82542_2_0:
   3367 	case WM_T_82542_2_1:
   3368 	case WM_T_82543:
   3369 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3370 		CSR_WRITE_FLUSH(sc);
   3371 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3372 		CSR_WRITE_FLUSH(sc);
   3373 		break;
   3374 	case WM_T_PCH2:
   3375 	case WM_T_PCH_LPT:
   3376 	case WM_T_PCH_SPT:
   3377 		if (idx == 0) {
   3378 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3379 			CSR_WRITE_FLUSH(sc);
   3380 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3381 			CSR_WRITE_FLUSH(sc);
   3382 			return;
   3383 		}
   3384 		if (sc->sc_type != WM_T_PCH2) {
   3385 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3386 			    FWSM_WLOCK_MAC);
   3387 			addrl = WMREG_SHRAL(idx - 1);
   3388 			addrh = WMREG_SHRAH(idx - 1);
   3389 		} else {
   3390 			wlock_mac = 0;
   3391 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3392 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3393 		}
   3394 
   3395 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3396 			rv = wm_get_swflag_ich8lan(sc);
   3397 			if (rv != 0)
   3398 				return;
   3399 			CSR_WRITE(sc, addrl, ral_lo);
   3400 			CSR_WRITE_FLUSH(sc);
   3401 			CSR_WRITE(sc, addrh, ral_hi);
   3402 			CSR_WRITE_FLUSH(sc);
   3403 			wm_put_swflag_ich8lan(sc);
   3404 		}
   3405 
   3406 		break;
   3407 	default:
   3408 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3409 		CSR_WRITE_FLUSH(sc);
   3410 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3411 		CSR_WRITE_FLUSH(sc);
   3412 		break;
   3413 	}
   3414 }
   3415 
   3416 /*
   3417  * wm_mchash:
   3418  *
   3419  *	Compute the hash of the multicast address for the 4096-bit
   3420  *	multicast filter.
   3421  */
   3422 static uint32_t
   3423 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3424 {
   3425 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3426 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3427 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3428 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3429 	uint32_t hash;
   3430 
   3431 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3432 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3433 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3434 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3435 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3436 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3437 		return (hash & 0x3ff);
   3438 	}
   3439 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3440 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3441 
   3442 	return (hash & 0xfff);
   3443 }
   3444 
   3445 /*
   3446  * wm_set_filter:
   3447  *
   3448  *	Set up the receive filter.
   3449  */
   3450 static void
   3451 wm_set_filter(struct wm_softc *sc)
   3452 {
   3453 	struct ethercom *ec = &sc->sc_ethercom;
   3454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3455 	struct ether_multi *enm;
   3456 	struct ether_multistep step;
   3457 	bus_addr_t mta_reg;
   3458 	uint32_t hash, reg, bit;
   3459 	int i, size, ralmax;
   3460 
   3461 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3462 		device_xname(sc->sc_dev), __func__));
   3463 
   3464 	if (sc->sc_type >= WM_T_82544)
   3465 		mta_reg = WMREG_CORDOVA_MTA;
   3466 	else
   3467 		mta_reg = WMREG_MTA;
   3468 
   3469 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3470 
   3471 	if (ifp->if_flags & IFF_BROADCAST)
   3472 		sc->sc_rctl |= RCTL_BAM;
   3473 	if (ifp->if_flags & IFF_PROMISC) {
   3474 		sc->sc_rctl |= RCTL_UPE;
   3475 		goto allmulti;
   3476 	}
   3477 
   3478 	/*
   3479 	 * Set the station address in the first RAL slot, and
   3480 	 * clear the remaining slots.
   3481 	 */
   3482 	if (sc->sc_type == WM_T_ICH8)
   3483 		size = WM_RAL_TABSIZE_ICH8 -1;
   3484 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3485 	    || (sc->sc_type == WM_T_PCH))
   3486 		size = WM_RAL_TABSIZE_ICH8;
   3487 	else if (sc->sc_type == WM_T_PCH2)
   3488 		size = WM_RAL_TABSIZE_PCH2;
   3489 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3490 		size = WM_RAL_TABSIZE_PCH_LPT;
   3491 	else if (sc->sc_type == WM_T_82575)
   3492 		size = WM_RAL_TABSIZE_82575;
   3493 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3494 		size = WM_RAL_TABSIZE_82576;
   3495 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3496 		size = WM_RAL_TABSIZE_I350;
   3497 	else
   3498 		size = WM_RAL_TABSIZE;
   3499 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3500 
   3501 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3502 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3503 		switch (i) {
   3504 		case 0:
   3505 			/* We can use all entries */
   3506 			ralmax = size;
   3507 			break;
   3508 		case 1:
   3509 			/* Only RAR[0] */
   3510 			ralmax = 1;
   3511 			break;
   3512 		default:
   3513 			/* available SHRA + RAR[0] */
   3514 			ralmax = i + 1;
   3515 		}
   3516 	} else
   3517 		ralmax = size;
   3518 	for (i = 1; i < size; i++) {
   3519 		if (i < ralmax)
   3520 			wm_set_ral(sc, NULL, i);
   3521 	}
   3522 
   3523 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3524 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3525 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3526 	    || (sc->sc_type == WM_T_PCH_SPT))
   3527 		size = WM_ICH8_MC_TABSIZE;
   3528 	else
   3529 		size = WM_MC_TABSIZE;
   3530 	/* Clear out the multicast table. */
   3531 	for (i = 0; i < size; i++) {
   3532 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3533 		CSR_WRITE_FLUSH(sc);
   3534 	}
   3535 
   3536 	ETHER_LOCK(ec);
   3537 	ETHER_FIRST_MULTI(step, ec, enm);
   3538 	while (enm != NULL) {
   3539 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3540 			ETHER_UNLOCK(ec);
   3541 			/*
   3542 			 * We must listen to a range of multicast addresses.
   3543 			 * For now, just accept all multicasts, rather than
   3544 			 * trying to set only those filter bits needed to match
   3545 			 * the range.  (At this time, the only use of address
   3546 			 * ranges is for IP multicast routing, for which the
   3547 			 * range is big enough to require all bits set.)
   3548 			 */
   3549 			goto allmulti;
   3550 		}
   3551 
   3552 		hash = wm_mchash(sc, enm->enm_addrlo);
   3553 
   3554 		reg = (hash >> 5);
   3555 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3556 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3557 		    || (sc->sc_type == WM_T_PCH2)
   3558 		    || (sc->sc_type == WM_T_PCH_LPT)
   3559 		    || (sc->sc_type == WM_T_PCH_SPT))
   3560 			reg &= 0x1f;
   3561 		else
   3562 			reg &= 0x7f;
   3563 		bit = hash & 0x1f;
   3564 
   3565 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3566 		hash |= 1U << bit;
   3567 
   3568 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3569 			/*
   3570 			 * 82544 Errata 9: Certain register cannot be written
   3571 			 * with particular alignments in PCI-X bus operation
   3572 			 * (FCAH, MTA and VFTA).
   3573 			 */
   3574 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3575 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3576 			CSR_WRITE_FLUSH(sc);
   3577 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3578 			CSR_WRITE_FLUSH(sc);
   3579 		} else {
   3580 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3581 			CSR_WRITE_FLUSH(sc);
   3582 		}
   3583 
   3584 		ETHER_NEXT_MULTI(step, enm);
   3585 	}
   3586 	ETHER_UNLOCK(ec);
   3587 
   3588 	ifp->if_flags &= ~IFF_ALLMULTI;
   3589 	goto setit;
   3590 
   3591  allmulti:
   3592 	ifp->if_flags |= IFF_ALLMULTI;
   3593 	sc->sc_rctl |= RCTL_MPE;
   3594 
   3595  setit:
   3596 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3597 }
   3598 
   3599 /* Reset and init related */
   3600 
   3601 static void
   3602 wm_set_vlan(struct wm_softc *sc)
   3603 {
   3604 
   3605 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3606 		device_xname(sc->sc_dev), __func__));
   3607 
   3608 	/* Deal with VLAN enables. */
   3609 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3610 		sc->sc_ctrl |= CTRL_VME;
   3611 	else
   3612 		sc->sc_ctrl &= ~CTRL_VME;
   3613 
   3614 	/* Write the control registers. */
   3615 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3616 }
   3617 
   3618 static void
   3619 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3620 {
   3621 	uint32_t gcr;
   3622 	pcireg_t ctrl2;
   3623 
   3624 	gcr = CSR_READ(sc, WMREG_GCR);
   3625 
   3626 	/* Only take action if timeout value is defaulted to 0 */
   3627 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3628 		goto out;
   3629 
   3630 	if ((gcr & GCR_CAP_VER2) == 0) {
   3631 		gcr |= GCR_CMPL_TMOUT_10MS;
   3632 		goto out;
   3633 	}
   3634 
   3635 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3636 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3637 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3638 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3639 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3640 
   3641 out:
   3642 	/* Disable completion timeout resend */
   3643 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3644 
   3645 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3646 }
   3647 
   3648 void
   3649 wm_get_auto_rd_done(struct wm_softc *sc)
   3650 {
   3651 	int i;
   3652 
   3653 	/* wait for eeprom to reload */
   3654 	switch (sc->sc_type) {
   3655 	case WM_T_82571:
   3656 	case WM_T_82572:
   3657 	case WM_T_82573:
   3658 	case WM_T_82574:
   3659 	case WM_T_82583:
   3660 	case WM_T_82575:
   3661 	case WM_T_82576:
   3662 	case WM_T_82580:
   3663 	case WM_T_I350:
   3664 	case WM_T_I354:
   3665 	case WM_T_I210:
   3666 	case WM_T_I211:
   3667 	case WM_T_80003:
   3668 	case WM_T_ICH8:
   3669 	case WM_T_ICH9:
   3670 		for (i = 0; i < 10; i++) {
   3671 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3672 				break;
   3673 			delay(1000);
   3674 		}
   3675 		if (i == 10) {
   3676 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3677 			    "complete\n", device_xname(sc->sc_dev));
   3678 		}
   3679 		break;
   3680 	default:
   3681 		break;
   3682 	}
   3683 }
   3684 
   3685 void
   3686 wm_lan_init_done(struct wm_softc *sc)
   3687 {
   3688 	uint32_t reg = 0;
   3689 	int i;
   3690 
   3691 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3692 		device_xname(sc->sc_dev), __func__));
   3693 
   3694 	/* Wait for eeprom to reload */
   3695 	switch (sc->sc_type) {
   3696 	case WM_T_ICH10:
   3697 	case WM_T_PCH:
   3698 	case WM_T_PCH2:
   3699 	case WM_T_PCH_LPT:
   3700 	case WM_T_PCH_SPT:
   3701 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3702 			reg = CSR_READ(sc, WMREG_STATUS);
   3703 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3704 				break;
   3705 			delay(100);
   3706 		}
   3707 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3708 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3709 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3710 		}
   3711 		break;
   3712 	default:
   3713 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3714 		    __func__);
   3715 		break;
   3716 	}
   3717 
   3718 	reg &= ~STATUS_LAN_INIT_DONE;
   3719 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3720 }
   3721 
   3722 void
   3723 wm_get_cfg_done(struct wm_softc *sc)
   3724 {
   3725 	int mask;
   3726 	uint32_t reg;
   3727 	int i;
   3728 
   3729 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3730 		device_xname(sc->sc_dev), __func__));
   3731 
   3732 	/* Wait for eeprom to reload */
   3733 	switch (sc->sc_type) {
   3734 	case WM_T_82542_2_0:
   3735 	case WM_T_82542_2_1:
   3736 		/* null */
   3737 		break;
   3738 	case WM_T_82543:
   3739 	case WM_T_82544:
   3740 	case WM_T_82540:
   3741 	case WM_T_82545:
   3742 	case WM_T_82545_3:
   3743 	case WM_T_82546:
   3744 	case WM_T_82546_3:
   3745 	case WM_T_82541:
   3746 	case WM_T_82541_2:
   3747 	case WM_T_82547:
   3748 	case WM_T_82547_2:
   3749 	case WM_T_82573:
   3750 	case WM_T_82574:
   3751 	case WM_T_82583:
   3752 		/* generic */
   3753 		delay(10*1000);
   3754 		break;
   3755 	case WM_T_80003:
   3756 	case WM_T_82571:
   3757 	case WM_T_82572:
   3758 	case WM_T_82575:
   3759 	case WM_T_82576:
   3760 	case WM_T_82580:
   3761 	case WM_T_I350:
   3762 	case WM_T_I354:
   3763 	case WM_T_I210:
   3764 	case WM_T_I211:
   3765 		if (sc->sc_type == WM_T_82571) {
   3766 			/* Only 82571 shares port 0 */
   3767 			mask = EEMNGCTL_CFGDONE_0;
   3768 		} else
   3769 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3770 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3771 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3772 				break;
   3773 			delay(1000);
   3774 		}
   3775 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3776 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3777 				device_xname(sc->sc_dev), __func__));
   3778 		}
   3779 		break;
   3780 	case WM_T_ICH8:
   3781 	case WM_T_ICH9:
   3782 	case WM_T_ICH10:
   3783 	case WM_T_PCH:
   3784 	case WM_T_PCH2:
   3785 	case WM_T_PCH_LPT:
   3786 	case WM_T_PCH_SPT:
   3787 		delay(10*1000);
   3788 		if (sc->sc_type >= WM_T_ICH10)
   3789 			wm_lan_init_done(sc);
   3790 		else
   3791 			wm_get_auto_rd_done(sc);
   3792 
   3793 		reg = CSR_READ(sc, WMREG_STATUS);
   3794 		if ((reg & STATUS_PHYRA) != 0)
   3795 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3796 		break;
   3797 	default:
   3798 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3799 		    __func__);
   3800 		break;
   3801 	}
   3802 }
   3803 
   3804 void
   3805 wm_phy_post_reset(struct wm_softc *sc)
   3806 {
   3807 	uint32_t reg;
   3808 
   3809 	/* This function is only for ICH8 and newer. */
   3810 	if (sc->sc_type < WM_T_ICH8)
   3811 		return;
   3812 
   3813 	if (wm_phy_resetisblocked(sc)) {
   3814 		/* XXX */
   3815 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3816 		return;
   3817 	}
   3818 
   3819 	/* Allow time for h/w to get to quiescent state after reset */
   3820 	delay(10*1000);
   3821 
   3822 	/* Perform any necessary post-reset workarounds */
   3823 	if (sc->sc_type == WM_T_PCH)
   3824 		wm_hv_phy_workaround_ich8lan(sc);
   3825 	if (sc->sc_type == WM_T_PCH2)
   3826 		wm_lv_phy_workaround_ich8lan(sc);
   3827 
   3828 	/* Clear the host wakeup bit after lcd reset */
   3829 	if (sc->sc_type >= WM_T_PCH) {
   3830 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3831 		    BM_PORT_GEN_CFG);
   3832 		reg &= ~BM_WUC_HOST_WU_BIT;
   3833 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3834 		    BM_PORT_GEN_CFG, reg);
   3835 	}
   3836 
   3837 	/* Configure the LCD with the extended configuration region in NVM */
   3838 	wm_init_lcd_from_nvm(sc);
   3839 
   3840 	/* Configure the LCD with the OEM bits in NVM */
   3841 }
   3842 
   3843 /* Only for PCH and newer */
   3844 static void
   3845 wm_write_smbus_addr(struct wm_softc *sc)
   3846 {
   3847 	uint32_t strap, freq;
   3848 	uint32_t phy_data;
   3849 
   3850 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3851 		device_xname(sc->sc_dev), __func__));
   3852 
   3853 	strap = CSR_READ(sc, WMREG_STRAP);
   3854 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3855 
   3856 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3857 
   3858 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3859 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3860 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3861 
   3862 	if (sc->sc_phytype == WMPHY_I217) {
   3863 		/* Restore SMBus frequency */
   3864 		if (freq --) {
   3865 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3866 			    | HV_SMB_ADDR_FREQ_HIGH);
   3867 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3868 			    HV_SMB_ADDR_FREQ_LOW);
   3869 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3870 			    HV_SMB_ADDR_FREQ_HIGH);
   3871 		} else {
   3872 			DPRINTF(WM_DEBUG_INIT,
   3873 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3874 				device_xname(sc->sc_dev), __func__));
   3875 		}
   3876 	}
   3877 
   3878 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3879 }
   3880 
   3881 void
   3882 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3883 {
   3884 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3885 	uint16_t phy_page = 0;
   3886 
   3887 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3888 		device_xname(sc->sc_dev), __func__));
   3889 
   3890 	switch (sc->sc_type) {
   3891 	case WM_T_ICH8:
   3892 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3893 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3894 			return;
   3895 
   3896 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3897 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3898 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3899 			break;
   3900 		}
   3901 		/* FALLTHROUGH */
   3902 	case WM_T_PCH:
   3903 	case WM_T_PCH2:
   3904 	case WM_T_PCH_LPT:
   3905 	case WM_T_PCH_SPT:
   3906 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3907 		break;
   3908 	default:
   3909 		return;
   3910 	}
   3911 
   3912 	sc->phy.acquire(sc);
   3913 
   3914 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3915 	if ((reg & sw_cfg_mask) == 0)
   3916 		goto release;
   3917 
   3918 	/*
   3919 	 * Make sure HW does not configure LCD from PHY extended configuration
   3920 	 * before SW configuration
   3921 	 */
   3922 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3923 	if ((sc->sc_type < WM_T_PCH2)
   3924 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3925 		goto release;
   3926 
   3927 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3928 		device_xname(sc->sc_dev), __func__));
   3929 	/* word_addr is in DWORD */
   3930 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3931 
   3932 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3933 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3934 
   3935 	if (((sc->sc_type == WM_T_PCH)
   3936 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3937 	    || (sc->sc_type > WM_T_PCH)) {
   3938 		/*
   3939 		 * HW configures the SMBus address and LEDs when the OEM and
   3940 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3941 		 * are cleared, SW will configure them instead.
   3942 		 */
   3943 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3944 			device_xname(sc->sc_dev), __func__));
   3945 		wm_write_smbus_addr(sc);
   3946 
   3947 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3948 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3949 	}
   3950 
   3951 	/* Configure LCD from extended configuration region. */
   3952 	for (i = 0; i < cnf_size; i++) {
   3953 		uint16_t reg_data, reg_addr;
   3954 
   3955 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3956 			goto release;
   3957 
   3958 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3959 			goto release;
   3960 
   3961 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3962 			phy_page = reg_data;
   3963 
   3964 		reg_addr &= IGPHY_MAXREGADDR;
   3965 		reg_addr |= phy_page;
   3966 
   3967 		sc->phy.release(sc); /* XXX */
   3968 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3969 		sc->phy.acquire(sc); /* XXX */
   3970 	}
   3971 
   3972 release:
   3973 	sc->phy.release(sc);
   3974 	return;
   3975 }
   3976 
   3977 
   3978 /* Init hardware bits */
   3979 void
   3980 wm_initialize_hardware_bits(struct wm_softc *sc)
   3981 {
   3982 	uint32_t tarc0, tarc1, reg;
   3983 
   3984 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3985 		device_xname(sc->sc_dev), __func__));
   3986 
   3987 	/* For 82571 variant, 80003 and ICHs */
   3988 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3989 	    || (sc->sc_type >= WM_T_80003)) {
   3990 
   3991 		/* Transmit Descriptor Control 0 */
   3992 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3993 		reg |= TXDCTL_COUNT_DESC;
   3994 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3995 
   3996 		/* Transmit Descriptor Control 1 */
   3997 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3998 		reg |= TXDCTL_COUNT_DESC;
   3999 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4000 
   4001 		/* TARC0 */
   4002 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4003 		switch (sc->sc_type) {
   4004 		case WM_T_82571:
   4005 		case WM_T_82572:
   4006 		case WM_T_82573:
   4007 		case WM_T_82574:
   4008 		case WM_T_82583:
   4009 		case WM_T_80003:
   4010 			/* Clear bits 30..27 */
   4011 			tarc0 &= ~__BITS(30, 27);
   4012 			break;
   4013 		default:
   4014 			break;
   4015 		}
   4016 
   4017 		switch (sc->sc_type) {
   4018 		case WM_T_82571:
   4019 		case WM_T_82572:
   4020 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4021 
   4022 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4023 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4024 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4025 			/* 8257[12] Errata No.7 */
   4026 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4027 
   4028 			/* TARC1 bit 28 */
   4029 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4030 				tarc1 &= ~__BIT(28);
   4031 			else
   4032 				tarc1 |= __BIT(28);
   4033 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4034 
   4035 			/*
   4036 			 * 8257[12] Errata No.13
   4037 			 * Disable Dyamic Clock Gating.
   4038 			 */
   4039 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4040 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4041 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4042 			break;
   4043 		case WM_T_82573:
   4044 		case WM_T_82574:
   4045 		case WM_T_82583:
   4046 			if ((sc->sc_type == WM_T_82574)
   4047 			    || (sc->sc_type == WM_T_82583))
   4048 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4049 
   4050 			/* Extended Device Control */
   4051 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4052 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4053 			reg |= __BIT(22);	/* Set bit 22 */
   4054 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4055 
   4056 			/* Device Control */
   4057 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4058 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4059 
   4060 			/* PCIe Control Register */
   4061 			/*
   4062 			 * 82573 Errata (unknown).
   4063 			 *
   4064 			 * 82574 Errata 25 and 82583 Errata 12
   4065 			 * "Dropped Rx Packets":
   4066 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4067 			 */
   4068 			reg = CSR_READ(sc, WMREG_GCR);
   4069 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4070 			CSR_WRITE(sc, WMREG_GCR, reg);
   4071 
   4072 			if ((sc->sc_type == WM_T_82574)
   4073 			    || (sc->sc_type == WM_T_82583)) {
   4074 				/*
   4075 				 * Document says this bit must be set for
   4076 				 * proper operation.
   4077 				 */
   4078 				reg = CSR_READ(sc, WMREG_GCR);
   4079 				reg |= __BIT(22);
   4080 				CSR_WRITE(sc, WMREG_GCR, reg);
   4081 
   4082 				/*
   4083 				 * Apply workaround for hardware errata
   4084 				 * documented in errata docs Fixes issue where
   4085 				 * some error prone or unreliable PCIe
   4086 				 * completions are occurring, particularly
   4087 				 * with ASPM enabled. Without fix, issue can
   4088 				 * cause Tx timeouts.
   4089 				 */
   4090 				reg = CSR_READ(sc, WMREG_GCR2);
   4091 				reg |= __BIT(0);
   4092 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4093 			}
   4094 			break;
   4095 		case WM_T_80003:
   4096 			/* TARC0 */
   4097 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4098 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4099 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4100 
   4101 			/* TARC1 bit 28 */
   4102 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4103 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4104 				tarc1 &= ~__BIT(28);
   4105 			else
   4106 				tarc1 |= __BIT(28);
   4107 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4108 			break;
   4109 		case WM_T_ICH8:
   4110 		case WM_T_ICH9:
   4111 		case WM_T_ICH10:
   4112 		case WM_T_PCH:
   4113 		case WM_T_PCH2:
   4114 		case WM_T_PCH_LPT:
   4115 		case WM_T_PCH_SPT:
   4116 			/* TARC0 */
   4117 			if (sc->sc_type == WM_T_ICH8) {
   4118 				/* Set TARC0 bits 29 and 28 */
   4119 				tarc0 |= __BITS(29, 28);
   4120 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4121 				tarc0 |= __BIT(29);
   4122 				/*
   4123 				 *  Drop bit 28. From Linux.
   4124 				 * See I218/I219 spec update
   4125 				 * "5. Buffer Overrun While the I219 is
   4126 				 * Processing DMA Transactions"
   4127 				 */
   4128 				tarc0 &= ~__BIT(28);
   4129 			}
   4130 			/* Set TARC0 bits 23,24,26,27 */
   4131 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4132 
   4133 			/* CTRL_EXT */
   4134 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4135 			reg |= __BIT(22);	/* Set bit 22 */
   4136 			/*
   4137 			 * Enable PHY low-power state when MAC is at D3
   4138 			 * w/o WoL
   4139 			 */
   4140 			if (sc->sc_type >= WM_T_PCH)
   4141 				reg |= CTRL_EXT_PHYPDEN;
   4142 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4143 
   4144 			/* TARC1 */
   4145 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4146 			/* bit 28 */
   4147 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4148 				tarc1 &= ~__BIT(28);
   4149 			else
   4150 				tarc1 |= __BIT(28);
   4151 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4152 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4153 
   4154 			/* Device Status */
   4155 			if (sc->sc_type == WM_T_ICH8) {
   4156 				reg = CSR_READ(sc, WMREG_STATUS);
   4157 				reg &= ~__BIT(31);
   4158 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4159 
   4160 			}
   4161 
   4162 			/* IOSFPC */
   4163 			if (sc->sc_type == WM_T_PCH_SPT) {
   4164 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4165 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4166 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4167 			}
   4168 			/*
   4169 			 * Work-around descriptor data corruption issue during
   4170 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4171 			 * capability.
   4172 			 */
   4173 			reg = CSR_READ(sc, WMREG_RFCTL);
   4174 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4175 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4176 			break;
   4177 		default:
   4178 			break;
   4179 		}
   4180 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4181 
   4182 		switch (sc->sc_type) {
   4183 		/*
   4184 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4185 		 * Avoid RSS Hash Value bug.
   4186 		 */
   4187 		case WM_T_82571:
   4188 		case WM_T_82572:
   4189 		case WM_T_82573:
   4190 		case WM_T_80003:
   4191 		case WM_T_ICH8:
   4192 			reg = CSR_READ(sc, WMREG_RFCTL);
   4193 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4194 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4195 			break;
   4196 		case WM_T_82574:
   4197 			/* use extened Rx descriptor. */
   4198 			reg = CSR_READ(sc, WMREG_RFCTL);
   4199 			reg |= WMREG_RFCTL_EXSTEN;
   4200 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4201 			break;
   4202 		default:
   4203 			break;
   4204 		}
   4205 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4206 		/*
   4207 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4208 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4209 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4210 		 * Correctly by the Device"
   4211 		 *
   4212 		 * I354(C2000) Errata AVR53:
   4213 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4214 		 * Hang"
   4215 		 */
   4216 		reg = CSR_READ(sc, WMREG_RFCTL);
   4217 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4218 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4219 	}
   4220 }
   4221 
   4222 static uint32_t
   4223 wm_rxpbs_adjust_82580(uint32_t val)
   4224 {
   4225 	uint32_t rv = 0;
   4226 
   4227 	if (val < __arraycount(wm_82580_rxpbs_table))
   4228 		rv = wm_82580_rxpbs_table[val];
   4229 
   4230 	return rv;
   4231 }
   4232 
   4233 /*
   4234  * wm_reset_phy:
   4235  *
   4236  *	generic PHY reset function.
   4237  *	Same as e1000_phy_hw_reset_generic()
   4238  */
   4239 static void
   4240 wm_reset_phy(struct wm_softc *sc)
   4241 {
   4242 	uint32_t reg;
   4243 
   4244 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4245 		device_xname(sc->sc_dev), __func__));
   4246 	if (wm_phy_resetisblocked(sc))
   4247 		return;
   4248 
   4249 	sc->phy.acquire(sc);
   4250 
   4251 	reg = CSR_READ(sc, WMREG_CTRL);
   4252 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4253 	CSR_WRITE_FLUSH(sc);
   4254 
   4255 	delay(sc->phy.reset_delay_us);
   4256 
   4257 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4258 	CSR_WRITE_FLUSH(sc);
   4259 
   4260 	delay(150);
   4261 
   4262 	sc->phy.release(sc);
   4263 
   4264 	wm_get_cfg_done(sc);
   4265 	wm_phy_post_reset(sc);
   4266 }
   4267 
   4268 /*
   4269  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4270  * so it is enough to check sc->sc_queue[0] only.
   4271  */
   4272 static void
   4273 wm_flush_desc_rings(struct wm_softc *sc)
   4274 {
   4275 	pcireg_t preg;
   4276 	uint32_t reg;
   4277 	struct wm_txqueue *txq;
   4278 	wiseman_txdesc_t *txd;
   4279 	int nexttx;
   4280 	uint32_t rctl;
   4281 
   4282 	/* First, disable MULR fix in FEXTNVM11 */
   4283 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4284 	reg |= FEXTNVM11_DIS_MULRFIX;
   4285 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4286 
   4287 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4288 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4289 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4290 		return;
   4291 
   4292 	/* TX */
   4293 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4294 	    device_xname(sc->sc_dev), preg, reg);
   4295 	reg = CSR_READ(sc, WMREG_TCTL);
   4296 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4297 
   4298 	txq = &sc->sc_queue[0].wmq_txq;
   4299 	nexttx = txq->txq_next;
   4300 	txd = &txq->txq_descs[nexttx];
   4301 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4302 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4303 	txd->wtx_fields.wtxu_status = 0;
   4304 	txd->wtx_fields.wtxu_options = 0;
   4305 	txd->wtx_fields.wtxu_vlan = 0;
   4306 
   4307 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4308 	    BUS_SPACE_BARRIER_WRITE);
   4309 
   4310 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4311 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4312 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4313 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4314 	delay(250);
   4315 
   4316 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4317 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4318 		return;
   4319 
   4320 	/* RX */
   4321 	printf("%s: Need RX flush (reg = %08x)\n",
   4322 	    device_xname(sc->sc_dev), preg);
   4323 	rctl = CSR_READ(sc, WMREG_RCTL);
   4324 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4325 	CSR_WRITE_FLUSH(sc);
   4326 	delay(150);
   4327 
   4328 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4329 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4330 	reg &= 0xffffc000;
   4331 	/*
   4332 	 * update thresholds: prefetch threshold to 31, host threshold
   4333 	 * to 1 and make sure the granularity is "descriptors" and not
   4334 	 * "cache lines"
   4335 	 */
   4336 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4337 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4338 
   4339 	/*
   4340 	 * momentarily enable the RX ring for the changes to take
   4341 	 * effect
   4342 	 */
   4343 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4344 	CSR_WRITE_FLUSH(sc);
   4345 	delay(150);
   4346 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4347 }
   4348 
   4349 /*
   4350  * wm_reset:
   4351  *
   4352  *	Reset the i82542 chip.
   4353  */
   4354 static void
   4355 wm_reset(struct wm_softc *sc)
   4356 {
   4357 	int phy_reset = 0;
   4358 	int i, error = 0;
   4359 	uint32_t reg;
   4360 	uint16_t kmreg;
   4361 	int rv;
   4362 
   4363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4364 		device_xname(sc->sc_dev), __func__));
   4365 	KASSERT(sc->sc_type != 0);
   4366 
   4367 	/*
   4368 	 * Allocate on-chip memory according to the MTU size.
   4369 	 * The Packet Buffer Allocation register must be written
   4370 	 * before the chip is reset.
   4371 	 */
   4372 	switch (sc->sc_type) {
   4373 	case WM_T_82547:
   4374 	case WM_T_82547_2:
   4375 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4376 		    PBA_22K : PBA_30K;
   4377 		for (i = 0; i < sc->sc_nqueues; i++) {
   4378 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4379 			txq->txq_fifo_head = 0;
   4380 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4381 			txq->txq_fifo_size =
   4382 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4383 			txq->txq_fifo_stall = 0;
   4384 		}
   4385 		break;
   4386 	case WM_T_82571:
   4387 	case WM_T_82572:
   4388 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4389 	case WM_T_80003:
   4390 		sc->sc_pba = PBA_32K;
   4391 		break;
   4392 	case WM_T_82573:
   4393 		sc->sc_pba = PBA_12K;
   4394 		break;
   4395 	case WM_T_82574:
   4396 	case WM_T_82583:
   4397 		sc->sc_pba = PBA_20K;
   4398 		break;
   4399 	case WM_T_82576:
   4400 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4401 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4402 		break;
   4403 	case WM_T_82580:
   4404 	case WM_T_I350:
   4405 	case WM_T_I354:
   4406 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4407 		break;
   4408 	case WM_T_I210:
   4409 	case WM_T_I211:
   4410 		sc->sc_pba = PBA_34K;
   4411 		break;
   4412 	case WM_T_ICH8:
   4413 		/* Workaround for a bit corruption issue in FIFO memory */
   4414 		sc->sc_pba = PBA_8K;
   4415 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4416 		break;
   4417 	case WM_T_ICH9:
   4418 	case WM_T_ICH10:
   4419 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4420 		    PBA_14K : PBA_10K;
   4421 		break;
   4422 	case WM_T_PCH:
   4423 	case WM_T_PCH2:
   4424 	case WM_T_PCH_LPT:
   4425 	case WM_T_PCH_SPT:
   4426 		sc->sc_pba = PBA_26K;
   4427 		break;
   4428 	default:
   4429 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4430 		    PBA_40K : PBA_48K;
   4431 		break;
   4432 	}
   4433 	/*
   4434 	 * Only old or non-multiqueue devices have the PBA register
   4435 	 * XXX Need special handling for 82575.
   4436 	 */
   4437 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4438 	    || (sc->sc_type == WM_T_82575))
   4439 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4440 
   4441 	/* Prevent the PCI-E bus from sticking */
   4442 	if (sc->sc_flags & WM_F_PCIE) {
   4443 		int timeout = 800;
   4444 
   4445 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4446 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4447 
   4448 		while (timeout--) {
   4449 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4450 			    == 0)
   4451 				break;
   4452 			delay(100);
   4453 		}
   4454 		if (timeout == 0)
   4455 			device_printf(sc->sc_dev,
   4456 			    "failed to disable busmastering\n");
   4457 	}
   4458 
   4459 	/* Set the completion timeout for interface */
   4460 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4461 	    || (sc->sc_type == WM_T_82580)
   4462 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4463 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4464 		wm_set_pcie_completion_timeout(sc);
   4465 
   4466 	/* Clear interrupt */
   4467 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4468 	if (wm_is_using_msix(sc)) {
   4469 		if (sc->sc_type != WM_T_82574) {
   4470 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4471 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4472 		} else {
   4473 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4474 		}
   4475 	}
   4476 
   4477 	/* Stop the transmit and receive processes. */
   4478 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4479 	sc->sc_rctl &= ~RCTL_EN;
   4480 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4481 	CSR_WRITE_FLUSH(sc);
   4482 
   4483 	/* XXX set_tbi_sbp_82543() */
   4484 
   4485 	delay(10*1000);
   4486 
   4487 	/* Must acquire the MDIO ownership before MAC reset */
   4488 	switch (sc->sc_type) {
   4489 	case WM_T_82573:
   4490 	case WM_T_82574:
   4491 	case WM_T_82583:
   4492 		error = wm_get_hw_semaphore_82573(sc);
   4493 		break;
   4494 	default:
   4495 		break;
   4496 	}
   4497 
   4498 	/*
   4499 	 * 82541 Errata 29? & 82547 Errata 28?
   4500 	 * See also the description about PHY_RST bit in CTRL register
   4501 	 * in 8254x_GBe_SDM.pdf.
   4502 	 */
   4503 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4504 		CSR_WRITE(sc, WMREG_CTRL,
   4505 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4506 		CSR_WRITE_FLUSH(sc);
   4507 		delay(5000);
   4508 	}
   4509 
   4510 	switch (sc->sc_type) {
   4511 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4512 	case WM_T_82541:
   4513 	case WM_T_82541_2:
   4514 	case WM_T_82547:
   4515 	case WM_T_82547_2:
   4516 		/*
   4517 		 * On some chipsets, a reset through a memory-mapped write
   4518 		 * cycle can cause the chip to reset before completing the
   4519 		 * write cycle.  This causes major headache that can be
   4520 		 * avoided by issuing the reset via indirect register writes
   4521 		 * through I/O space.
   4522 		 *
   4523 		 * So, if we successfully mapped the I/O BAR at attach time,
   4524 		 * use that.  Otherwise, try our luck with a memory-mapped
   4525 		 * reset.
   4526 		 */
   4527 		if (sc->sc_flags & WM_F_IOH_VALID)
   4528 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4529 		else
   4530 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4531 		break;
   4532 	case WM_T_82545_3:
   4533 	case WM_T_82546_3:
   4534 		/* Use the shadow control register on these chips. */
   4535 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4536 		break;
   4537 	case WM_T_80003:
   4538 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4539 		sc->phy.acquire(sc);
   4540 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4541 		sc->phy.release(sc);
   4542 		break;
   4543 	case WM_T_ICH8:
   4544 	case WM_T_ICH9:
   4545 	case WM_T_ICH10:
   4546 	case WM_T_PCH:
   4547 	case WM_T_PCH2:
   4548 	case WM_T_PCH_LPT:
   4549 	case WM_T_PCH_SPT:
   4550 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4551 		if (wm_phy_resetisblocked(sc) == false) {
   4552 			/*
   4553 			 * Gate automatic PHY configuration by hardware on
   4554 			 * non-managed 82579
   4555 			 */
   4556 			if ((sc->sc_type == WM_T_PCH2)
   4557 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4558 				== 0))
   4559 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4560 
   4561 			reg |= CTRL_PHY_RESET;
   4562 			phy_reset = 1;
   4563 		} else
   4564 			printf("XXX reset is blocked!!!\n");
   4565 		sc->phy.acquire(sc);
   4566 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4567 		/* Don't insert a completion barrier when reset */
   4568 		delay(20*1000);
   4569 		mutex_exit(sc->sc_ich_phymtx);
   4570 		break;
   4571 	case WM_T_82580:
   4572 	case WM_T_I350:
   4573 	case WM_T_I354:
   4574 	case WM_T_I210:
   4575 	case WM_T_I211:
   4576 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4577 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4578 			CSR_WRITE_FLUSH(sc);
   4579 		delay(5000);
   4580 		break;
   4581 	case WM_T_82542_2_0:
   4582 	case WM_T_82542_2_1:
   4583 	case WM_T_82543:
   4584 	case WM_T_82540:
   4585 	case WM_T_82545:
   4586 	case WM_T_82546:
   4587 	case WM_T_82571:
   4588 	case WM_T_82572:
   4589 	case WM_T_82573:
   4590 	case WM_T_82574:
   4591 	case WM_T_82575:
   4592 	case WM_T_82576:
   4593 	case WM_T_82583:
   4594 	default:
   4595 		/* Everything else can safely use the documented method. */
   4596 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4597 		break;
   4598 	}
   4599 
   4600 	/* Must release the MDIO ownership after MAC reset */
   4601 	switch (sc->sc_type) {
   4602 	case WM_T_82573:
   4603 	case WM_T_82574:
   4604 	case WM_T_82583:
   4605 		if (error == 0)
   4606 			wm_put_hw_semaphore_82573(sc);
   4607 		break;
   4608 	default:
   4609 		break;
   4610 	}
   4611 
   4612 	if (phy_reset != 0)
   4613 		wm_get_cfg_done(sc);
   4614 
   4615 	/* reload EEPROM */
   4616 	switch (sc->sc_type) {
   4617 	case WM_T_82542_2_0:
   4618 	case WM_T_82542_2_1:
   4619 	case WM_T_82543:
   4620 	case WM_T_82544:
   4621 		delay(10);
   4622 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4623 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4624 		CSR_WRITE_FLUSH(sc);
   4625 		delay(2000);
   4626 		break;
   4627 	case WM_T_82540:
   4628 	case WM_T_82545:
   4629 	case WM_T_82545_3:
   4630 	case WM_T_82546:
   4631 	case WM_T_82546_3:
   4632 		delay(5*1000);
   4633 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4634 		break;
   4635 	case WM_T_82541:
   4636 	case WM_T_82541_2:
   4637 	case WM_T_82547:
   4638 	case WM_T_82547_2:
   4639 		delay(20000);
   4640 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4641 		break;
   4642 	case WM_T_82571:
   4643 	case WM_T_82572:
   4644 	case WM_T_82573:
   4645 	case WM_T_82574:
   4646 	case WM_T_82583:
   4647 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4648 			delay(10);
   4649 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4651 			CSR_WRITE_FLUSH(sc);
   4652 		}
   4653 		/* check EECD_EE_AUTORD */
   4654 		wm_get_auto_rd_done(sc);
   4655 		/*
   4656 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4657 		 * is set.
   4658 		 */
   4659 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4660 		    || (sc->sc_type == WM_T_82583))
   4661 			delay(25*1000);
   4662 		break;
   4663 	case WM_T_82575:
   4664 	case WM_T_82576:
   4665 	case WM_T_82580:
   4666 	case WM_T_I350:
   4667 	case WM_T_I354:
   4668 	case WM_T_I210:
   4669 	case WM_T_I211:
   4670 	case WM_T_80003:
   4671 		/* check EECD_EE_AUTORD */
   4672 		wm_get_auto_rd_done(sc);
   4673 		break;
   4674 	case WM_T_ICH8:
   4675 	case WM_T_ICH9:
   4676 	case WM_T_ICH10:
   4677 	case WM_T_PCH:
   4678 	case WM_T_PCH2:
   4679 	case WM_T_PCH_LPT:
   4680 	case WM_T_PCH_SPT:
   4681 		break;
   4682 	default:
   4683 		panic("%s: unknown type\n", __func__);
   4684 	}
   4685 
   4686 	/* Check whether EEPROM is present or not */
   4687 	switch (sc->sc_type) {
   4688 	case WM_T_82575:
   4689 	case WM_T_82576:
   4690 	case WM_T_82580:
   4691 	case WM_T_I350:
   4692 	case WM_T_I354:
   4693 	case WM_T_ICH8:
   4694 	case WM_T_ICH9:
   4695 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4696 			/* Not found */
   4697 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4698 			if (sc->sc_type == WM_T_82575)
   4699 				wm_reset_init_script_82575(sc);
   4700 		}
   4701 		break;
   4702 	default:
   4703 		break;
   4704 	}
   4705 
   4706 	if (phy_reset != 0)
   4707 		wm_phy_post_reset(sc);
   4708 
   4709 	if ((sc->sc_type == WM_T_82580)
   4710 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4711 		/* clear global device reset status bit */
   4712 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4713 	}
   4714 
   4715 	/* Clear any pending interrupt events. */
   4716 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4717 	reg = CSR_READ(sc, WMREG_ICR);
   4718 	if (wm_is_using_msix(sc)) {
   4719 		if (sc->sc_type != WM_T_82574) {
   4720 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4721 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4722 		} else
   4723 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4724 	}
   4725 
   4726 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4727 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4728 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4729 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4730 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4731 		reg |= KABGTXD_BGSQLBIAS;
   4732 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4733 	}
   4734 
   4735 	/* reload sc_ctrl */
   4736 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4737 
   4738 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4739 		wm_set_eee_i350(sc);
   4740 
   4741 	/*
   4742 	 * For PCH, this write will make sure that any noise will be detected
   4743 	 * as a CRC error and be dropped rather than show up as a bad packet
   4744 	 * to the DMA engine
   4745 	 */
   4746 	if (sc->sc_type == WM_T_PCH)
   4747 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4748 
   4749 	if (sc->sc_type >= WM_T_82544)
   4750 		CSR_WRITE(sc, WMREG_WUC, 0);
   4751 
   4752 	wm_reset_mdicnfg_82580(sc);
   4753 
   4754 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4755 		wm_pll_workaround_i210(sc);
   4756 
   4757 	if (sc->sc_type == WM_T_80003) {
   4758 		/* default to TRUE to enable the MDIC W/A */
   4759 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4760 
   4761 		rv = wm_kmrn_readreg(sc,
   4762 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4763 		if (rv == 0) {
   4764 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4765 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4766 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4767 			else
   4768 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4769 		}
   4770 	}
   4771 }
   4772 
   4773 /*
   4774  * wm_add_rxbuf:
   4775  *
   4776  *	Add a receive buffer to the indiciated descriptor.
   4777  */
   4778 static int
   4779 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4780 {
   4781 	struct wm_softc *sc = rxq->rxq_sc;
   4782 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4783 	struct mbuf *m;
   4784 	int error;
   4785 
   4786 	KASSERT(mutex_owned(rxq->rxq_lock));
   4787 
   4788 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4789 	if (m == NULL)
   4790 		return ENOBUFS;
   4791 
   4792 	MCLGET(m, M_DONTWAIT);
   4793 	if ((m->m_flags & M_EXT) == 0) {
   4794 		m_freem(m);
   4795 		return ENOBUFS;
   4796 	}
   4797 
   4798 	if (rxs->rxs_mbuf != NULL)
   4799 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4800 
   4801 	rxs->rxs_mbuf = m;
   4802 
   4803 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4804 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4805 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4806 	if (error) {
   4807 		/* XXX XXX XXX */
   4808 		aprint_error_dev(sc->sc_dev,
   4809 		    "unable to load rx DMA map %d, error = %d\n",
   4810 		    idx, error);
   4811 		panic("wm_add_rxbuf");
   4812 	}
   4813 
   4814 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4815 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4816 
   4817 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4818 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4819 			wm_init_rxdesc(rxq, idx);
   4820 	} else
   4821 		wm_init_rxdesc(rxq, idx);
   4822 
   4823 	return 0;
   4824 }
   4825 
   4826 /*
   4827  * wm_rxdrain:
   4828  *
   4829  *	Drain the receive queue.
   4830  */
   4831 static void
   4832 wm_rxdrain(struct wm_rxqueue *rxq)
   4833 {
   4834 	struct wm_softc *sc = rxq->rxq_sc;
   4835 	struct wm_rxsoft *rxs;
   4836 	int i;
   4837 
   4838 	KASSERT(mutex_owned(rxq->rxq_lock));
   4839 
   4840 	for (i = 0; i < WM_NRXDESC; i++) {
   4841 		rxs = &rxq->rxq_soft[i];
   4842 		if (rxs->rxs_mbuf != NULL) {
   4843 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4844 			m_freem(rxs->rxs_mbuf);
   4845 			rxs->rxs_mbuf = NULL;
   4846 		}
   4847 	}
   4848 }
   4849 
   4850 /*
   4851  * Setup registers for RSS.
   4852  *
   4853  * XXX not yet VMDq support
   4854  */
   4855 static void
   4856 wm_init_rss(struct wm_softc *sc)
   4857 {
   4858 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4859 	int i;
   4860 
   4861 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4862 
   4863 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4864 		int qid, reta_ent;
   4865 
   4866 		qid  = i % sc->sc_nqueues;
   4867 		switch(sc->sc_type) {
   4868 		case WM_T_82574:
   4869 			reta_ent = __SHIFTIN(qid,
   4870 			    RETA_ENT_QINDEX_MASK_82574);
   4871 			break;
   4872 		case WM_T_82575:
   4873 			reta_ent = __SHIFTIN(qid,
   4874 			    RETA_ENT_QINDEX1_MASK_82575);
   4875 			break;
   4876 		default:
   4877 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4878 			break;
   4879 		}
   4880 
   4881 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4882 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4883 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4884 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4885 	}
   4886 
   4887 	rss_getkey((uint8_t *)rss_key);
   4888 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4889 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4890 
   4891 	if (sc->sc_type == WM_T_82574)
   4892 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4893 	else
   4894 		mrqc = MRQC_ENABLE_RSS_MQ;
   4895 
   4896 	/*
   4897 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4898 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4899 	 */
   4900 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4901 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4902 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4903 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4904 
   4905 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4906 }
   4907 
   4908 /*
   4909  * Adjust TX and RX queue numbers which the system actulally uses.
   4910  *
   4911  * The numbers are affected by below parameters.
   4912  *     - The nubmer of hardware queues
   4913  *     - The number of MSI-X vectors (= "nvectors" argument)
   4914  *     - ncpu
   4915  */
   4916 static void
   4917 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4918 {
   4919 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4920 
   4921 	if (nvectors < 2) {
   4922 		sc->sc_nqueues = 1;
   4923 		return;
   4924 	}
   4925 
   4926 	switch(sc->sc_type) {
   4927 	case WM_T_82572:
   4928 		hw_ntxqueues = 2;
   4929 		hw_nrxqueues = 2;
   4930 		break;
   4931 	case WM_T_82574:
   4932 		hw_ntxqueues = 2;
   4933 		hw_nrxqueues = 2;
   4934 		break;
   4935 	case WM_T_82575:
   4936 		hw_ntxqueues = 4;
   4937 		hw_nrxqueues = 4;
   4938 		break;
   4939 	case WM_T_82576:
   4940 		hw_ntxqueues = 16;
   4941 		hw_nrxqueues = 16;
   4942 		break;
   4943 	case WM_T_82580:
   4944 	case WM_T_I350:
   4945 	case WM_T_I354:
   4946 		hw_ntxqueues = 8;
   4947 		hw_nrxqueues = 8;
   4948 		break;
   4949 	case WM_T_I210:
   4950 		hw_ntxqueues = 4;
   4951 		hw_nrxqueues = 4;
   4952 		break;
   4953 	case WM_T_I211:
   4954 		hw_ntxqueues = 2;
   4955 		hw_nrxqueues = 2;
   4956 		break;
   4957 		/*
   4958 		 * As below ethernet controllers does not support MSI-X,
   4959 		 * this driver let them not use multiqueue.
   4960 		 *     - WM_T_80003
   4961 		 *     - WM_T_ICH8
   4962 		 *     - WM_T_ICH9
   4963 		 *     - WM_T_ICH10
   4964 		 *     - WM_T_PCH
   4965 		 *     - WM_T_PCH2
   4966 		 *     - WM_T_PCH_LPT
   4967 		 */
   4968 	default:
   4969 		hw_ntxqueues = 1;
   4970 		hw_nrxqueues = 1;
   4971 		break;
   4972 	}
   4973 
   4974 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4975 
   4976 	/*
   4977 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4978 	 * the number of queues used actually.
   4979 	 */
   4980 	if (nvectors < hw_nqueues + 1) {
   4981 		sc->sc_nqueues = nvectors - 1;
   4982 	} else {
   4983 		sc->sc_nqueues = hw_nqueues;
   4984 	}
   4985 
   4986 	/*
   4987 	 * As queues more then cpus cannot improve scaling, we limit
   4988 	 * the number of queues used actually.
   4989 	 */
   4990 	if (ncpu < sc->sc_nqueues)
   4991 		sc->sc_nqueues = ncpu;
   4992 }
   4993 
   4994 static inline bool
   4995 wm_is_using_msix(struct wm_softc *sc)
   4996 {
   4997 
   4998 	return (sc->sc_nintrs > 1);
   4999 }
   5000 
   5001 static inline bool
   5002 wm_is_using_multiqueue(struct wm_softc *sc)
   5003 {
   5004 
   5005 	return (sc->sc_nqueues > 1);
   5006 }
   5007 
   5008 static int
   5009 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5010 {
   5011 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5012 	wmq->wmq_id = qidx;
   5013 	wmq->wmq_intr_idx = intr_idx;
   5014 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5015 #ifdef WM_MPSAFE
   5016 	    | SOFTINT_MPSAFE
   5017 #endif
   5018 	    , wm_handle_queue, wmq);
   5019 	if (wmq->wmq_si != NULL)
   5020 		return 0;
   5021 
   5022 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5023 	    wmq->wmq_id);
   5024 
   5025 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5026 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5027 	return ENOMEM;
   5028 }
   5029 
   5030 /*
   5031  * Both single interrupt MSI and INTx can use this function.
   5032  */
   5033 static int
   5034 wm_setup_legacy(struct wm_softc *sc)
   5035 {
   5036 	pci_chipset_tag_t pc = sc->sc_pc;
   5037 	const char *intrstr = NULL;
   5038 	char intrbuf[PCI_INTRSTR_LEN];
   5039 	int error;
   5040 
   5041 	error = wm_alloc_txrx_queues(sc);
   5042 	if (error) {
   5043 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5044 		    error);
   5045 		return ENOMEM;
   5046 	}
   5047 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5048 	    sizeof(intrbuf));
   5049 #ifdef WM_MPSAFE
   5050 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5051 #endif
   5052 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5053 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5054 	if (sc->sc_ihs[0] == NULL) {
   5055 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5056 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5057 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5058 		return ENOMEM;
   5059 	}
   5060 
   5061 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5062 	sc->sc_nintrs = 1;
   5063 
   5064 	return wm_softint_establish(sc, 0, 0);
   5065 }
   5066 
   5067 static int
   5068 wm_setup_msix(struct wm_softc *sc)
   5069 {
   5070 	void *vih;
   5071 	kcpuset_t *affinity;
   5072 	int qidx, error, intr_idx, txrx_established;
   5073 	pci_chipset_tag_t pc = sc->sc_pc;
   5074 	const char *intrstr = NULL;
   5075 	char intrbuf[PCI_INTRSTR_LEN];
   5076 	char intr_xname[INTRDEVNAMEBUF];
   5077 
   5078 	if (sc->sc_nqueues < ncpu) {
   5079 		/*
   5080 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5081 		 * interrupts start from CPU#1.
   5082 		 */
   5083 		sc->sc_affinity_offset = 1;
   5084 	} else {
   5085 		/*
   5086 		 * In this case, this device use all CPUs. So, we unify
   5087 		 * affinitied cpu_index to msix vector number for readability.
   5088 		 */
   5089 		sc->sc_affinity_offset = 0;
   5090 	}
   5091 
   5092 	error = wm_alloc_txrx_queues(sc);
   5093 	if (error) {
   5094 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5095 		    error);
   5096 		return ENOMEM;
   5097 	}
   5098 
   5099 	kcpuset_create(&affinity, false);
   5100 	intr_idx = 0;
   5101 
   5102 	/*
   5103 	 * TX and RX
   5104 	 */
   5105 	txrx_established = 0;
   5106 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5107 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5108 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5109 
   5110 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5111 		    sizeof(intrbuf));
   5112 #ifdef WM_MPSAFE
   5113 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5114 		    PCI_INTR_MPSAFE, true);
   5115 #endif
   5116 		memset(intr_xname, 0, sizeof(intr_xname));
   5117 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5118 		    device_xname(sc->sc_dev), qidx);
   5119 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5120 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5121 		if (vih == NULL) {
   5122 			aprint_error_dev(sc->sc_dev,
   5123 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5124 			    intrstr ? " at " : "",
   5125 			    intrstr ? intrstr : "");
   5126 
   5127 			goto fail;
   5128 		}
   5129 		kcpuset_zero(affinity);
   5130 		/* Round-robin affinity */
   5131 		kcpuset_set(affinity, affinity_to);
   5132 		error = interrupt_distribute(vih, affinity, NULL);
   5133 		if (error == 0) {
   5134 			aprint_normal_dev(sc->sc_dev,
   5135 			    "for TX and RX interrupting at %s affinity to %u\n",
   5136 			    intrstr, affinity_to);
   5137 		} else {
   5138 			aprint_normal_dev(sc->sc_dev,
   5139 			    "for TX and RX interrupting at %s\n", intrstr);
   5140 		}
   5141 		sc->sc_ihs[intr_idx] = vih;
   5142 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5143 			goto fail;
   5144 		txrx_established++;
   5145 		intr_idx++;
   5146 	}
   5147 
   5148 	/*
   5149 	 * LINK
   5150 	 */
   5151 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5152 	    sizeof(intrbuf));
   5153 #ifdef WM_MPSAFE
   5154 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5155 #endif
   5156 	memset(intr_xname, 0, sizeof(intr_xname));
   5157 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5158 	    device_xname(sc->sc_dev));
   5159 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5160 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5161 	if (vih == NULL) {
   5162 		aprint_error_dev(sc->sc_dev,
   5163 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5164 		    intrstr ? " at " : "",
   5165 		    intrstr ? intrstr : "");
   5166 
   5167 		goto fail;
   5168 	}
   5169 	/* keep default affinity to LINK interrupt */
   5170 	aprint_normal_dev(sc->sc_dev,
   5171 	    "for LINK interrupting at %s\n", intrstr);
   5172 	sc->sc_ihs[intr_idx] = vih;
   5173 	sc->sc_link_intr_idx = intr_idx;
   5174 
   5175 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5176 	kcpuset_destroy(affinity);
   5177 	return 0;
   5178 
   5179  fail:
   5180 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5181 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5182 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5183 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5184 	}
   5185 
   5186 	kcpuset_destroy(affinity);
   5187 	return ENOMEM;
   5188 }
   5189 
   5190 static void
   5191 wm_unset_stopping_flags(struct wm_softc *sc)
   5192 {
   5193 	int i;
   5194 
   5195 	KASSERT(WM_CORE_LOCKED(sc));
   5196 
   5197 	/*
   5198 	 * must unset stopping flags in ascending order.
   5199 	 */
   5200 	for(i = 0; i < sc->sc_nqueues; i++) {
   5201 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5202 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5203 
   5204 		mutex_enter(txq->txq_lock);
   5205 		txq->txq_stopping = false;
   5206 		mutex_exit(txq->txq_lock);
   5207 
   5208 		mutex_enter(rxq->rxq_lock);
   5209 		rxq->rxq_stopping = false;
   5210 		mutex_exit(rxq->rxq_lock);
   5211 	}
   5212 
   5213 	sc->sc_core_stopping = false;
   5214 }
   5215 
   5216 static void
   5217 wm_set_stopping_flags(struct wm_softc *sc)
   5218 {
   5219 	int i;
   5220 
   5221 	KASSERT(WM_CORE_LOCKED(sc));
   5222 
   5223 	sc->sc_core_stopping = true;
   5224 
   5225 	/*
   5226 	 * must set stopping flags in ascending order.
   5227 	 */
   5228 	for(i = 0; i < sc->sc_nqueues; i++) {
   5229 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5230 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5231 
   5232 		mutex_enter(rxq->rxq_lock);
   5233 		rxq->rxq_stopping = true;
   5234 		mutex_exit(rxq->rxq_lock);
   5235 
   5236 		mutex_enter(txq->txq_lock);
   5237 		txq->txq_stopping = true;
   5238 		mutex_exit(txq->txq_lock);
   5239 	}
   5240 }
   5241 
   5242 /*
   5243  * write interrupt interval value to ITR or EITR
   5244  */
   5245 static void
   5246 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5247 {
   5248 
   5249 	if (!wmq->wmq_set_itr)
   5250 		return;
   5251 
   5252 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5253 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5254 
   5255 		/*
   5256 		 * 82575 doesn't have CNT_INGR field.
   5257 		 * So, overwrite counter field by software.
   5258 		 */
   5259 		if (sc->sc_type == WM_T_82575)
   5260 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5261 		else
   5262 			eitr |= EITR_CNT_INGR;
   5263 
   5264 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5265 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5266 		/*
   5267 		 * 82574 has both ITR and EITR. SET EITR when we use
   5268 		 * the multi queue function with MSI-X.
   5269 		 */
   5270 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5271 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5272 	} else {
   5273 		KASSERT(wmq->wmq_id == 0);
   5274 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5275 	}
   5276 
   5277 	wmq->wmq_set_itr = false;
   5278 }
   5279 
   5280 /*
   5281  * TODO
   5282  * Below dynamic calculation of itr is almost the same as linux igb,
   5283  * however it does not fit to wm(4). So, we will have been disable AIM
   5284  * until we will find appropriate calculation of itr.
   5285  */
   5286 /*
   5287  * calculate interrupt interval value to be going to write register in
   5288  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5289  */
   5290 static void
   5291 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5292 {
   5293 #ifdef NOTYET
   5294 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5295 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5296 	uint32_t avg_size = 0;
   5297 	uint32_t new_itr;
   5298 
   5299 	if (rxq->rxq_packets)
   5300 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5301 	if (txq->txq_packets)
   5302 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5303 
   5304 	if (avg_size == 0) {
   5305 		new_itr = 450; /* restore default value */
   5306 		goto out;
   5307 	}
   5308 
   5309 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5310 	avg_size += 24;
   5311 
   5312 	/* Don't starve jumbo frames */
   5313 	avg_size = min(avg_size, 3000);
   5314 
   5315 	/* Give a little boost to mid-size frames */
   5316 	if ((avg_size > 300) && (avg_size < 1200))
   5317 		new_itr = avg_size / 3;
   5318 	else
   5319 		new_itr = avg_size / 2;
   5320 
   5321 out:
   5322 	/*
   5323 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5324 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5325 	 */
   5326 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5327 		new_itr *= 4;
   5328 
   5329 	if (new_itr != wmq->wmq_itr) {
   5330 		wmq->wmq_itr = new_itr;
   5331 		wmq->wmq_set_itr = true;
   5332 	} else
   5333 		wmq->wmq_set_itr = false;
   5334 
   5335 	rxq->rxq_packets = 0;
   5336 	rxq->rxq_bytes = 0;
   5337 	txq->txq_packets = 0;
   5338 	txq->txq_bytes = 0;
   5339 #endif
   5340 }
   5341 
   5342 /*
   5343  * wm_init:		[ifnet interface function]
   5344  *
   5345  *	Initialize the interface.
   5346  */
   5347 static int
   5348 wm_init(struct ifnet *ifp)
   5349 {
   5350 	struct wm_softc *sc = ifp->if_softc;
   5351 	int ret;
   5352 
   5353 	WM_CORE_LOCK(sc);
   5354 	ret = wm_init_locked(ifp);
   5355 	WM_CORE_UNLOCK(sc);
   5356 
   5357 	return ret;
   5358 }
   5359 
   5360 static int
   5361 wm_init_locked(struct ifnet *ifp)
   5362 {
   5363 	struct wm_softc *sc = ifp->if_softc;
   5364 	int i, j, trynum, error = 0;
   5365 	uint32_t reg;
   5366 
   5367 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5368 		device_xname(sc->sc_dev), __func__));
   5369 	KASSERT(WM_CORE_LOCKED(sc));
   5370 
   5371 	/*
   5372 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5373 	 * There is a small but measurable benefit to avoiding the adjusment
   5374 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5375 	 * on such platforms.  One possibility is that the DMA itself is
   5376 	 * slightly more efficient if the front of the entire packet (instead
   5377 	 * of the front of the headers) is aligned.
   5378 	 *
   5379 	 * Note we must always set align_tweak to 0 if we are using
   5380 	 * jumbo frames.
   5381 	 */
   5382 #ifdef __NO_STRICT_ALIGNMENT
   5383 	sc->sc_align_tweak = 0;
   5384 #else
   5385 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5386 		sc->sc_align_tweak = 0;
   5387 	else
   5388 		sc->sc_align_tweak = 2;
   5389 #endif /* __NO_STRICT_ALIGNMENT */
   5390 
   5391 	/* Cancel any pending I/O. */
   5392 	wm_stop_locked(ifp, 0);
   5393 
   5394 	/* update statistics before reset */
   5395 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5396 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5397 
   5398 	/* PCH_SPT hardware workaround */
   5399 	if (sc->sc_type == WM_T_PCH_SPT)
   5400 		wm_flush_desc_rings(sc);
   5401 
   5402 	/* Reset the chip to a known state. */
   5403 	wm_reset(sc);
   5404 
   5405 	/*
   5406 	 * AMT based hardware can now take control from firmware
   5407 	 * Do this after reset.
   5408 	 */
   5409 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5410 		wm_get_hw_control(sc);
   5411 
   5412 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5413 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5414 		wm_legacy_irq_quirk_spt(sc);
   5415 
   5416 	/* Init hardware bits */
   5417 	wm_initialize_hardware_bits(sc);
   5418 
   5419 	/* Reset the PHY. */
   5420 	if (sc->sc_flags & WM_F_HAS_MII)
   5421 		wm_gmii_reset(sc);
   5422 
   5423 	/* Calculate (E)ITR value */
   5424 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5425 		/*
   5426 		 * For NEWQUEUE's EITR (except for 82575).
   5427 		 * 82575's EITR should be set same throttling value as other
   5428 		 * old controllers' ITR because the interrupt/sec calculation
   5429 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5430 		 *
   5431 		 * 82574's EITR should be set same throttling value as ITR.
   5432 		 *
   5433 		 * For N interrupts/sec, set this value to:
   5434 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5435 		 */
   5436 		sc->sc_itr_init = 450;
   5437 	} else if (sc->sc_type >= WM_T_82543) {
   5438 		/*
   5439 		 * Set up the interrupt throttling register (units of 256ns)
   5440 		 * Note that a footnote in Intel's documentation says this
   5441 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5442 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5443 		 * that that is also true for the 1024ns units of the other
   5444 		 * interrupt-related timer registers -- so, really, we ought
   5445 		 * to divide this value by 4 when the link speed is low.
   5446 		 *
   5447 		 * XXX implement this division at link speed change!
   5448 		 */
   5449 
   5450 		/*
   5451 		 * For N interrupts/sec, set this value to:
   5452 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5453 		 * absolute and packet timer values to this value
   5454 		 * divided by 4 to get "simple timer" behavior.
   5455 		 */
   5456 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5457 	}
   5458 
   5459 	error = wm_init_txrx_queues(sc);
   5460 	if (error)
   5461 		goto out;
   5462 
   5463 	/*
   5464 	 * Clear out the VLAN table -- we don't use it (yet).
   5465 	 */
   5466 	CSR_WRITE(sc, WMREG_VET, 0);
   5467 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5468 		trynum = 10; /* Due to hw errata */
   5469 	else
   5470 		trynum = 1;
   5471 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5472 		for (j = 0; j < trynum; j++)
   5473 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5474 
   5475 	/*
   5476 	 * Set up flow-control parameters.
   5477 	 *
   5478 	 * XXX Values could probably stand some tuning.
   5479 	 */
   5480 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5481 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5482 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5483 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5484 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5485 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5486 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5487 	}
   5488 
   5489 	sc->sc_fcrtl = FCRTL_DFLT;
   5490 	if (sc->sc_type < WM_T_82543) {
   5491 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5492 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5493 	} else {
   5494 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5495 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5496 	}
   5497 
   5498 	if (sc->sc_type == WM_T_80003)
   5499 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5500 	else
   5501 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5502 
   5503 	/* Writes the control register. */
   5504 	wm_set_vlan(sc);
   5505 
   5506 	if (sc->sc_flags & WM_F_HAS_MII) {
   5507 		uint16_t kmreg;
   5508 
   5509 		switch (sc->sc_type) {
   5510 		case WM_T_80003:
   5511 		case WM_T_ICH8:
   5512 		case WM_T_ICH9:
   5513 		case WM_T_ICH10:
   5514 		case WM_T_PCH:
   5515 		case WM_T_PCH2:
   5516 		case WM_T_PCH_LPT:
   5517 		case WM_T_PCH_SPT:
   5518 			/*
   5519 			 * Set the mac to wait the maximum time between each
   5520 			 * iteration and increase the max iterations when
   5521 			 * polling the phy; this fixes erroneous timeouts at
   5522 			 * 10Mbps.
   5523 			 */
   5524 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5525 			    0xFFFF);
   5526 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5527 			    &kmreg);
   5528 			kmreg |= 0x3F;
   5529 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5530 			    kmreg);
   5531 			break;
   5532 		default:
   5533 			break;
   5534 		}
   5535 
   5536 		if (sc->sc_type == WM_T_80003) {
   5537 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5538 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5539 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5540 
   5541 			/* Bypass RX and TX FIFO's */
   5542 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5543 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5544 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5545 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5546 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5547 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5548 		}
   5549 	}
   5550 #if 0
   5551 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5552 #endif
   5553 
   5554 	/* Set up checksum offload parameters. */
   5555 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5556 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5557 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5558 		reg |= RXCSUM_IPOFL;
   5559 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5560 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5561 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5562 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5563 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5564 
   5565 	/* Set registers about MSI-X */
   5566 	if (wm_is_using_msix(sc)) {
   5567 		uint32_t ivar;
   5568 		struct wm_queue *wmq;
   5569 		int qid, qintr_idx;
   5570 
   5571 		if (sc->sc_type == WM_T_82575) {
   5572 			/* Interrupt control */
   5573 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5574 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5575 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5576 
   5577 			/* TX and RX */
   5578 			for (i = 0; i < sc->sc_nqueues; i++) {
   5579 				wmq = &sc->sc_queue[i];
   5580 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5581 				    EITR_TX_QUEUE(wmq->wmq_id)
   5582 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5583 			}
   5584 			/* Link status */
   5585 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5586 			    EITR_OTHER);
   5587 		} else if (sc->sc_type == WM_T_82574) {
   5588 			/* Interrupt control */
   5589 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5590 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5591 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5592 
   5593 			/*
   5594 			 * workaround issue with spurious interrupts
   5595 			 * in MSI-X mode.
   5596 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5597 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5598 			 */
   5599 			reg = CSR_READ(sc, WMREG_RFCTL);
   5600 			reg |= WMREG_RFCTL_ACKDIS;
   5601 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5602 
   5603 			ivar = 0;
   5604 			/* TX and RX */
   5605 			for (i = 0; i < sc->sc_nqueues; i++) {
   5606 				wmq = &sc->sc_queue[i];
   5607 				qid = wmq->wmq_id;
   5608 				qintr_idx = wmq->wmq_intr_idx;
   5609 
   5610 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5611 				    IVAR_TX_MASK_Q_82574(qid));
   5612 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5613 				    IVAR_RX_MASK_Q_82574(qid));
   5614 			}
   5615 			/* Link status */
   5616 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5617 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5618 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5619 		} else {
   5620 			/* Interrupt control */
   5621 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5622 			    | GPIE_EIAME | GPIE_PBA);
   5623 
   5624 			switch (sc->sc_type) {
   5625 			case WM_T_82580:
   5626 			case WM_T_I350:
   5627 			case WM_T_I354:
   5628 			case WM_T_I210:
   5629 			case WM_T_I211:
   5630 				/* TX and RX */
   5631 				for (i = 0; i < sc->sc_nqueues; i++) {
   5632 					wmq = &sc->sc_queue[i];
   5633 					qid = wmq->wmq_id;
   5634 					qintr_idx = wmq->wmq_intr_idx;
   5635 
   5636 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5637 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5638 					ivar |= __SHIFTIN((qintr_idx
   5639 						| IVAR_VALID),
   5640 					    IVAR_TX_MASK_Q(qid));
   5641 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5642 					ivar |= __SHIFTIN((qintr_idx
   5643 						| IVAR_VALID),
   5644 					    IVAR_RX_MASK_Q(qid));
   5645 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5646 				}
   5647 				break;
   5648 			case WM_T_82576:
   5649 				/* TX and RX */
   5650 				for (i = 0; i < sc->sc_nqueues; i++) {
   5651 					wmq = &sc->sc_queue[i];
   5652 					qid = wmq->wmq_id;
   5653 					qintr_idx = wmq->wmq_intr_idx;
   5654 
   5655 					ivar = CSR_READ(sc,
   5656 					    WMREG_IVAR_Q_82576(qid));
   5657 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5658 					ivar |= __SHIFTIN((qintr_idx
   5659 						| IVAR_VALID),
   5660 					    IVAR_TX_MASK_Q_82576(qid));
   5661 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5662 					ivar |= __SHIFTIN((qintr_idx
   5663 						| IVAR_VALID),
   5664 					    IVAR_RX_MASK_Q_82576(qid));
   5665 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5666 					    ivar);
   5667 				}
   5668 				break;
   5669 			default:
   5670 				break;
   5671 			}
   5672 
   5673 			/* Link status */
   5674 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5675 			    IVAR_MISC_OTHER);
   5676 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5677 		}
   5678 
   5679 		if (wm_is_using_multiqueue(sc)) {
   5680 			wm_init_rss(sc);
   5681 
   5682 			/*
   5683 			** NOTE: Receive Full-Packet Checksum Offload
   5684 			** is mutually exclusive with Multiqueue. However
   5685 			** this is not the same as TCP/IP checksums which
   5686 			** still work.
   5687 			*/
   5688 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5689 			reg |= RXCSUM_PCSD;
   5690 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5691 		}
   5692 	}
   5693 
   5694 	/* Set up the interrupt registers. */
   5695 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5696 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5697 	    ICR_RXO | ICR_RXT0;
   5698 	if (wm_is_using_msix(sc)) {
   5699 		uint32_t mask;
   5700 		struct wm_queue *wmq;
   5701 
   5702 		switch (sc->sc_type) {
   5703 		case WM_T_82574:
   5704 			mask = 0;
   5705 			for (i = 0; i < sc->sc_nqueues; i++) {
   5706 				wmq = &sc->sc_queue[i];
   5707 				mask |= ICR_TXQ(wmq->wmq_id);
   5708 				mask |= ICR_RXQ(wmq->wmq_id);
   5709 			}
   5710 			mask |= ICR_OTHER;
   5711 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5712 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5713 			break;
   5714 		default:
   5715 			if (sc->sc_type == WM_T_82575) {
   5716 				mask = 0;
   5717 				for (i = 0; i < sc->sc_nqueues; i++) {
   5718 					wmq = &sc->sc_queue[i];
   5719 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5720 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5721 				}
   5722 				mask |= EITR_OTHER;
   5723 			} else {
   5724 				mask = 0;
   5725 				for (i = 0; i < sc->sc_nqueues; i++) {
   5726 					wmq = &sc->sc_queue[i];
   5727 					mask |= 1 << wmq->wmq_intr_idx;
   5728 				}
   5729 				mask |= 1 << sc->sc_link_intr_idx;
   5730 			}
   5731 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5732 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5733 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5734 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5735 			break;
   5736 		}
   5737 	} else
   5738 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5739 
   5740 	/* Set up the inter-packet gap. */
   5741 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5742 
   5743 	if (sc->sc_type >= WM_T_82543) {
   5744 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5745 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5746 			wm_itrs_writereg(sc, wmq);
   5747 		}
   5748 		/*
   5749 		 * Link interrupts occur much less than TX
   5750 		 * interrupts and RX interrupts. So, we don't
   5751 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5752 		 * FreeBSD's if_igb.
   5753 		 */
   5754 	}
   5755 
   5756 	/* Set the VLAN ethernetype. */
   5757 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5758 
   5759 	/*
   5760 	 * Set up the transmit control register; we start out with
   5761 	 * a collision distance suitable for FDX, but update it whe
   5762 	 * we resolve the media type.
   5763 	 */
   5764 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5765 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5766 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5767 	if (sc->sc_type >= WM_T_82571)
   5768 		sc->sc_tctl |= TCTL_MULR;
   5769 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5770 
   5771 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5772 		/* Write TDT after TCTL.EN is set. See the document. */
   5773 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5774 	}
   5775 
   5776 	if (sc->sc_type == WM_T_80003) {
   5777 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5778 		reg &= ~TCTL_EXT_GCEX_MASK;
   5779 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5780 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5781 	}
   5782 
   5783 	/* Set the media. */
   5784 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5785 		goto out;
   5786 
   5787 	/* Configure for OS presence */
   5788 	wm_init_manageability(sc);
   5789 
   5790 	/*
   5791 	 * Set up the receive control register; we actually program
   5792 	 * the register when we set the receive filter.  Use multicast
   5793 	 * address offset type 0.
   5794 	 *
   5795 	 * Only the i82544 has the ability to strip the incoming
   5796 	 * CRC, so we don't enable that feature.
   5797 	 */
   5798 	sc->sc_mchash_type = 0;
   5799 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5800 	    | RCTL_MO(sc->sc_mchash_type);
   5801 
   5802 	/*
   5803 	 * 82574 use one buffer extended Rx descriptor.
   5804 	 */
   5805 	if (sc->sc_type == WM_T_82574)
   5806 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5807 
   5808 	/*
   5809 	 * The I350 has a bug where it always strips the CRC whether
   5810 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5811 	 */
   5812 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5813 	    || (sc->sc_type == WM_T_I210))
   5814 		sc->sc_rctl |= RCTL_SECRC;
   5815 
   5816 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5817 	    && (ifp->if_mtu > ETHERMTU)) {
   5818 		sc->sc_rctl |= RCTL_LPE;
   5819 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5820 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5821 	}
   5822 
   5823 	if (MCLBYTES == 2048) {
   5824 		sc->sc_rctl |= RCTL_2k;
   5825 	} else {
   5826 		if (sc->sc_type >= WM_T_82543) {
   5827 			switch (MCLBYTES) {
   5828 			case 4096:
   5829 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5830 				break;
   5831 			case 8192:
   5832 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5833 				break;
   5834 			case 16384:
   5835 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5836 				break;
   5837 			default:
   5838 				panic("wm_init: MCLBYTES %d unsupported",
   5839 				    MCLBYTES);
   5840 				break;
   5841 			}
   5842 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5843 	}
   5844 
   5845 	/* Enable ECC */
   5846 	switch (sc->sc_type) {
   5847 	case WM_T_82571:
   5848 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5849 		reg |= PBA_ECC_CORR_EN;
   5850 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5851 		break;
   5852 	case WM_T_PCH_LPT:
   5853 	case WM_T_PCH_SPT:
   5854 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5855 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5856 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5857 
   5858 		sc->sc_ctrl |= CTRL_MEHE;
   5859 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5860 		break;
   5861 	default:
   5862 		break;
   5863 	}
   5864 
   5865 	/*
   5866 	 * Set the receive filter.
   5867 	 *
   5868 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5869 	 * the setting of RCTL.EN in wm_set_filter()
   5870 	 */
   5871 	wm_set_filter(sc);
   5872 
   5873 	/* On 575 and later set RDT only if RX enabled */
   5874 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5875 		int qidx;
   5876 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5877 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5878 			for (i = 0; i < WM_NRXDESC; i++) {
   5879 				mutex_enter(rxq->rxq_lock);
   5880 				wm_init_rxdesc(rxq, i);
   5881 				mutex_exit(rxq->rxq_lock);
   5882 
   5883 			}
   5884 		}
   5885 	}
   5886 
   5887 	wm_unset_stopping_flags(sc);
   5888 
   5889 	/* Start the one second link check clock. */
   5890 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5891 
   5892 	/* ...all done! */
   5893 	ifp->if_flags |= IFF_RUNNING;
   5894 	ifp->if_flags &= ~IFF_OACTIVE;
   5895 
   5896  out:
   5897 	sc->sc_if_flags = ifp->if_flags;
   5898 	if (error)
   5899 		log(LOG_ERR, "%s: interface not running\n",
   5900 		    device_xname(sc->sc_dev));
   5901 	return error;
   5902 }
   5903 
   5904 /*
   5905  * wm_stop:		[ifnet interface function]
   5906  *
   5907  *	Stop transmission on the interface.
   5908  */
   5909 static void
   5910 wm_stop(struct ifnet *ifp, int disable)
   5911 {
   5912 	struct wm_softc *sc = ifp->if_softc;
   5913 
   5914 	WM_CORE_LOCK(sc);
   5915 	wm_stop_locked(ifp, disable);
   5916 	WM_CORE_UNLOCK(sc);
   5917 }
   5918 
   5919 static void
   5920 wm_stop_locked(struct ifnet *ifp, int disable)
   5921 {
   5922 	struct wm_softc *sc = ifp->if_softc;
   5923 	struct wm_txsoft *txs;
   5924 	int i, qidx;
   5925 
   5926 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5927 		device_xname(sc->sc_dev), __func__));
   5928 	KASSERT(WM_CORE_LOCKED(sc));
   5929 
   5930 	wm_set_stopping_flags(sc);
   5931 
   5932 	/* Stop the one second clock. */
   5933 	callout_stop(&sc->sc_tick_ch);
   5934 
   5935 	/* Stop the 82547 Tx FIFO stall check timer. */
   5936 	if (sc->sc_type == WM_T_82547)
   5937 		callout_stop(&sc->sc_txfifo_ch);
   5938 
   5939 	if (sc->sc_flags & WM_F_HAS_MII) {
   5940 		/* Down the MII. */
   5941 		mii_down(&sc->sc_mii);
   5942 	} else {
   5943 #if 0
   5944 		/* Should we clear PHY's status properly? */
   5945 		wm_reset(sc);
   5946 #endif
   5947 	}
   5948 
   5949 	/* Stop the transmit and receive processes. */
   5950 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5951 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5952 	sc->sc_rctl &= ~RCTL_EN;
   5953 
   5954 	/*
   5955 	 * Clear the interrupt mask to ensure the device cannot assert its
   5956 	 * interrupt line.
   5957 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5958 	 * service any currently pending or shared interrupt.
   5959 	 */
   5960 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5961 	sc->sc_icr = 0;
   5962 	if (wm_is_using_msix(sc)) {
   5963 		if (sc->sc_type != WM_T_82574) {
   5964 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5965 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5966 		} else
   5967 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5968 	}
   5969 
   5970 	/* Release any queued transmit buffers. */
   5971 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5972 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5973 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5974 		mutex_enter(txq->txq_lock);
   5975 		txq->txq_watchdog = false; /* ensure watchdog disabled */
   5976 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5977 			txs = &txq->txq_soft[i];
   5978 			if (txs->txs_mbuf != NULL) {
   5979 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5980 				m_freem(txs->txs_mbuf);
   5981 				txs->txs_mbuf = NULL;
   5982 			}
   5983 		}
   5984 		mutex_exit(txq->txq_lock);
   5985 	}
   5986 
   5987 	/* Mark the interface as down and cancel the watchdog timer. */
   5988 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5989 
   5990 	if (disable) {
   5991 		for (i = 0; i < sc->sc_nqueues; i++) {
   5992 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5993 			mutex_enter(rxq->rxq_lock);
   5994 			wm_rxdrain(rxq);
   5995 			mutex_exit(rxq->rxq_lock);
   5996 		}
   5997 	}
   5998 
   5999 #if 0 /* notyet */
   6000 	if (sc->sc_type >= WM_T_82544)
   6001 		CSR_WRITE(sc, WMREG_WUC, 0);
   6002 #endif
   6003 }
   6004 
   6005 static void
   6006 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6007 {
   6008 	struct mbuf *m;
   6009 	int i;
   6010 
   6011 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6012 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6013 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6014 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6015 		    m->m_data, m->m_len, m->m_flags);
   6016 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6017 	    i, i == 1 ? "" : "s");
   6018 }
   6019 
   6020 /*
   6021  * wm_82547_txfifo_stall:
   6022  *
   6023  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6024  *	reset the FIFO pointers, and restart packet transmission.
   6025  */
   6026 static void
   6027 wm_82547_txfifo_stall(void *arg)
   6028 {
   6029 	struct wm_softc *sc = arg;
   6030 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6031 
   6032 	mutex_enter(txq->txq_lock);
   6033 
   6034 	if (txq->txq_stopping)
   6035 		goto out;
   6036 
   6037 	if (txq->txq_fifo_stall) {
   6038 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6039 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6040 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6041 			/*
   6042 			 * Packets have drained.  Stop transmitter, reset
   6043 			 * FIFO pointers, restart transmitter, and kick
   6044 			 * the packet queue.
   6045 			 */
   6046 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6047 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6048 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6049 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6050 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6051 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6052 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6053 			CSR_WRITE_FLUSH(sc);
   6054 
   6055 			txq->txq_fifo_head = 0;
   6056 			txq->txq_fifo_stall = 0;
   6057 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6058 		} else {
   6059 			/*
   6060 			 * Still waiting for packets to drain; try again in
   6061 			 * another tick.
   6062 			 */
   6063 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6064 		}
   6065 	}
   6066 
   6067 out:
   6068 	mutex_exit(txq->txq_lock);
   6069 }
   6070 
   6071 /*
   6072  * wm_82547_txfifo_bugchk:
   6073  *
   6074  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6075  *	prevent enqueueing a packet that would wrap around the end
   6076  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6077  *
   6078  *	We do this by checking the amount of space before the end
   6079  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6080  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6081  *	the internal FIFO pointers to the beginning, and restart
   6082  *	transmission on the interface.
   6083  */
   6084 #define	WM_FIFO_HDR		0x10
   6085 #define	WM_82547_PAD_LEN	0x3e0
   6086 static int
   6087 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6088 {
   6089 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6090 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6091 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6092 
   6093 	/* Just return if already stalled. */
   6094 	if (txq->txq_fifo_stall)
   6095 		return 1;
   6096 
   6097 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6098 		/* Stall only occurs in half-duplex mode. */
   6099 		goto send_packet;
   6100 	}
   6101 
   6102 	if (len >= WM_82547_PAD_LEN + space) {
   6103 		txq->txq_fifo_stall = 1;
   6104 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6105 		return 1;
   6106 	}
   6107 
   6108  send_packet:
   6109 	txq->txq_fifo_head += len;
   6110 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6111 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6112 
   6113 	return 0;
   6114 }
   6115 
   6116 static int
   6117 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6118 {
   6119 	int error;
   6120 
   6121 	/*
   6122 	 * Allocate the control data structures, and create and load the
   6123 	 * DMA map for it.
   6124 	 *
   6125 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6126 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6127 	 * both sets within the same 4G segment.
   6128 	 */
   6129 	if (sc->sc_type < WM_T_82544)
   6130 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6131 	else
   6132 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6133 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6134 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6135 	else
   6136 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6137 
   6138 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6139 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6140 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6141 		aprint_error_dev(sc->sc_dev,
   6142 		    "unable to allocate TX control data, error = %d\n",
   6143 		    error);
   6144 		goto fail_0;
   6145 	}
   6146 
   6147 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6148 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6149 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6150 		aprint_error_dev(sc->sc_dev,
   6151 		    "unable to map TX control data, error = %d\n", error);
   6152 		goto fail_1;
   6153 	}
   6154 
   6155 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6156 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6157 		aprint_error_dev(sc->sc_dev,
   6158 		    "unable to create TX control data DMA map, error = %d\n",
   6159 		    error);
   6160 		goto fail_2;
   6161 	}
   6162 
   6163 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6164 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6165 		aprint_error_dev(sc->sc_dev,
   6166 		    "unable to load TX control data DMA map, error = %d\n",
   6167 		    error);
   6168 		goto fail_3;
   6169 	}
   6170 
   6171 	return 0;
   6172 
   6173  fail_3:
   6174 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6175  fail_2:
   6176 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6177 	    WM_TXDESCS_SIZE(txq));
   6178  fail_1:
   6179 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6180  fail_0:
   6181 	return error;
   6182 }
   6183 
   6184 static void
   6185 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6186 {
   6187 
   6188 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6189 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6190 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6191 	    WM_TXDESCS_SIZE(txq));
   6192 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6193 }
   6194 
   6195 static int
   6196 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6197 {
   6198 	int error;
   6199 	size_t rxq_descs_size;
   6200 
   6201 	/*
   6202 	 * Allocate the control data structures, and create and load the
   6203 	 * DMA map for it.
   6204 	 *
   6205 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6206 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6207 	 * both sets within the same 4G segment.
   6208 	 */
   6209 	rxq->rxq_ndesc = WM_NRXDESC;
   6210 	if (sc->sc_type == WM_T_82574)
   6211 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6212 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6213 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6214 	else
   6215 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6216 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6217 
   6218 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6219 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6220 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6221 		aprint_error_dev(sc->sc_dev,
   6222 		    "unable to allocate RX control data, error = %d\n",
   6223 		    error);
   6224 		goto fail_0;
   6225 	}
   6226 
   6227 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6228 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6229 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6230 		aprint_error_dev(sc->sc_dev,
   6231 		    "unable to map RX control data, error = %d\n", error);
   6232 		goto fail_1;
   6233 	}
   6234 
   6235 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6236 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6237 		aprint_error_dev(sc->sc_dev,
   6238 		    "unable to create RX control data DMA map, error = %d\n",
   6239 		    error);
   6240 		goto fail_2;
   6241 	}
   6242 
   6243 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6244 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6245 		aprint_error_dev(sc->sc_dev,
   6246 		    "unable to load RX control data DMA map, error = %d\n",
   6247 		    error);
   6248 		goto fail_3;
   6249 	}
   6250 
   6251 	return 0;
   6252 
   6253  fail_3:
   6254 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6255  fail_2:
   6256 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6257 	    rxq_descs_size);
   6258  fail_1:
   6259 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6260  fail_0:
   6261 	return error;
   6262 }
   6263 
   6264 static void
   6265 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6266 {
   6267 
   6268 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6269 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6270 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6271 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6272 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6273 }
   6274 
   6275 
   6276 static int
   6277 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6278 {
   6279 	int i, error;
   6280 
   6281 	/* Create the transmit buffer DMA maps. */
   6282 	WM_TXQUEUELEN(txq) =
   6283 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6284 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6285 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6286 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6287 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6288 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6289 			aprint_error_dev(sc->sc_dev,
   6290 			    "unable to create Tx DMA map %d, error = %d\n",
   6291 			    i, error);
   6292 			goto fail;
   6293 		}
   6294 	}
   6295 
   6296 	return 0;
   6297 
   6298  fail:
   6299 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6300 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6301 			bus_dmamap_destroy(sc->sc_dmat,
   6302 			    txq->txq_soft[i].txs_dmamap);
   6303 	}
   6304 	return error;
   6305 }
   6306 
   6307 static void
   6308 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6309 {
   6310 	int i;
   6311 
   6312 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6313 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6314 			bus_dmamap_destroy(sc->sc_dmat,
   6315 			    txq->txq_soft[i].txs_dmamap);
   6316 	}
   6317 }
   6318 
   6319 static int
   6320 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6321 {
   6322 	int i, error;
   6323 
   6324 	/* Create the receive buffer DMA maps. */
   6325 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6326 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6327 			    MCLBYTES, 0, 0,
   6328 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6329 			aprint_error_dev(sc->sc_dev,
   6330 			    "unable to create Rx DMA map %d error = %d\n",
   6331 			    i, error);
   6332 			goto fail;
   6333 		}
   6334 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6335 	}
   6336 
   6337 	return 0;
   6338 
   6339  fail:
   6340 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6341 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6342 			bus_dmamap_destroy(sc->sc_dmat,
   6343 			    rxq->rxq_soft[i].rxs_dmamap);
   6344 	}
   6345 	return error;
   6346 }
   6347 
   6348 static void
   6349 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6350 {
   6351 	int i;
   6352 
   6353 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6354 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6355 			bus_dmamap_destroy(sc->sc_dmat,
   6356 			    rxq->rxq_soft[i].rxs_dmamap);
   6357 	}
   6358 }
   6359 
   6360 /*
   6361  * wm_alloc_quques:
   6362  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6363  */
   6364 static int
   6365 wm_alloc_txrx_queues(struct wm_softc *sc)
   6366 {
   6367 	int i, error, tx_done, rx_done;
   6368 
   6369 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6370 	    KM_SLEEP);
   6371 	if (sc->sc_queue == NULL) {
   6372 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6373 		error = ENOMEM;
   6374 		goto fail_0;
   6375 	}
   6376 
   6377 	/*
   6378 	 * For transmission
   6379 	 */
   6380 	error = 0;
   6381 	tx_done = 0;
   6382 	for (i = 0; i < sc->sc_nqueues; i++) {
   6383 #ifdef WM_EVENT_COUNTERS
   6384 		int j;
   6385 		const char *xname;
   6386 #endif
   6387 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6388 		txq->txq_sc = sc;
   6389 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6390 
   6391 		error = wm_alloc_tx_descs(sc, txq);
   6392 		if (error)
   6393 			break;
   6394 		error = wm_alloc_tx_buffer(sc, txq);
   6395 		if (error) {
   6396 			wm_free_tx_descs(sc, txq);
   6397 			break;
   6398 		}
   6399 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6400 		if (txq->txq_interq == NULL) {
   6401 			wm_free_tx_descs(sc, txq);
   6402 			wm_free_tx_buffer(sc, txq);
   6403 			error = ENOMEM;
   6404 			break;
   6405 		}
   6406 
   6407 #ifdef WM_EVENT_COUNTERS
   6408 		xname = device_xname(sc->sc_dev);
   6409 
   6410 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6411 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6412 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6413 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6414 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6415 
   6416 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6417 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6418 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6419 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6420 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6421 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6422 
   6423 		for (j = 0; j < WM_NTXSEGS; j++) {
   6424 			snprintf(txq->txq_txseg_evcnt_names[j],
   6425 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6426 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6427 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6428 		}
   6429 
   6430 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6431 
   6432 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6433 #endif /* WM_EVENT_COUNTERS */
   6434 
   6435 		tx_done++;
   6436 	}
   6437 	if (error)
   6438 		goto fail_1;
   6439 
   6440 	/*
   6441 	 * For recieve
   6442 	 */
   6443 	error = 0;
   6444 	rx_done = 0;
   6445 	for (i = 0; i < sc->sc_nqueues; i++) {
   6446 #ifdef WM_EVENT_COUNTERS
   6447 		const char *xname;
   6448 #endif
   6449 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6450 		rxq->rxq_sc = sc;
   6451 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6452 
   6453 		error = wm_alloc_rx_descs(sc, rxq);
   6454 		if (error)
   6455 			break;
   6456 
   6457 		error = wm_alloc_rx_buffer(sc, rxq);
   6458 		if (error) {
   6459 			wm_free_rx_descs(sc, rxq);
   6460 			break;
   6461 		}
   6462 
   6463 #ifdef WM_EVENT_COUNTERS
   6464 		xname = device_xname(sc->sc_dev);
   6465 
   6466 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6467 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6468 
   6469 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6470 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6471 #endif /* WM_EVENT_COUNTERS */
   6472 
   6473 		rx_done++;
   6474 	}
   6475 	if (error)
   6476 		goto fail_2;
   6477 
   6478 	return 0;
   6479 
   6480  fail_2:
   6481 	for (i = 0; i < rx_done; i++) {
   6482 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6483 		wm_free_rx_buffer(sc, rxq);
   6484 		wm_free_rx_descs(sc, rxq);
   6485 		if (rxq->rxq_lock)
   6486 			mutex_obj_free(rxq->rxq_lock);
   6487 	}
   6488  fail_1:
   6489 	for (i = 0; i < tx_done; i++) {
   6490 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6491 		pcq_destroy(txq->txq_interq);
   6492 		wm_free_tx_buffer(sc, txq);
   6493 		wm_free_tx_descs(sc, txq);
   6494 		if (txq->txq_lock)
   6495 			mutex_obj_free(txq->txq_lock);
   6496 	}
   6497 
   6498 	kmem_free(sc->sc_queue,
   6499 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6500  fail_0:
   6501 	return error;
   6502 }
   6503 
   6504 /*
   6505  * wm_free_quques:
   6506  *	Free {tx,rx}descs and {tx,rx} buffers
   6507  */
   6508 static void
   6509 wm_free_txrx_queues(struct wm_softc *sc)
   6510 {
   6511 	int i;
   6512 
   6513 	for (i = 0; i < sc->sc_nqueues; i++) {
   6514 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6515 
   6516 #ifdef WM_EVENT_COUNTERS
   6517 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6518 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6519 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6520 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6521 #endif /* WM_EVENT_COUNTERS */
   6522 
   6523 		wm_free_rx_buffer(sc, rxq);
   6524 		wm_free_rx_descs(sc, rxq);
   6525 		if (rxq->rxq_lock)
   6526 			mutex_obj_free(rxq->rxq_lock);
   6527 	}
   6528 
   6529 	for (i = 0; i < sc->sc_nqueues; i++) {
   6530 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6531 		struct mbuf *m;
   6532 #ifdef WM_EVENT_COUNTERS
   6533 		int j;
   6534 
   6535 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6536 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6537 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6538 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6539 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6540 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6541 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6542 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6543 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6544 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6545 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6546 
   6547 		for (j = 0; j < WM_NTXSEGS; j++)
   6548 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6549 
   6550 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6551 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6552 #endif /* WM_EVENT_COUNTERS */
   6553 
   6554 		/* drain txq_interq */
   6555 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6556 			m_freem(m);
   6557 		pcq_destroy(txq->txq_interq);
   6558 
   6559 		wm_free_tx_buffer(sc, txq);
   6560 		wm_free_tx_descs(sc, txq);
   6561 		if (txq->txq_lock)
   6562 			mutex_obj_free(txq->txq_lock);
   6563 	}
   6564 
   6565 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6566 }
   6567 
   6568 static void
   6569 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6570 {
   6571 
   6572 	KASSERT(mutex_owned(txq->txq_lock));
   6573 
   6574 	/* Initialize the transmit descriptor ring. */
   6575 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6576 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6577 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6578 	txq->txq_free = WM_NTXDESC(txq);
   6579 	txq->txq_next = 0;
   6580 }
   6581 
   6582 static void
   6583 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6584     struct wm_txqueue *txq)
   6585 {
   6586 
   6587 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6588 		device_xname(sc->sc_dev), __func__));
   6589 	KASSERT(mutex_owned(txq->txq_lock));
   6590 
   6591 	if (sc->sc_type < WM_T_82543) {
   6592 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6593 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6594 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6595 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6596 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6597 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6598 	} else {
   6599 		int qid = wmq->wmq_id;
   6600 
   6601 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6602 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6603 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6604 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6605 
   6606 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6607 			/*
   6608 			 * Don't write TDT before TCTL.EN is set.
   6609 			 * See the document.
   6610 			 */
   6611 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6612 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6613 			    | TXDCTL_WTHRESH(0));
   6614 		else {
   6615 			/* XXX should update with AIM? */
   6616 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6617 			if (sc->sc_type >= WM_T_82540) {
   6618 				/* should be same */
   6619 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6620 			}
   6621 
   6622 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6623 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6624 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6625 		}
   6626 	}
   6627 }
   6628 
   6629 static void
   6630 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6631 {
   6632 	int i;
   6633 
   6634 	KASSERT(mutex_owned(txq->txq_lock));
   6635 
   6636 	/* Initialize the transmit job descriptors. */
   6637 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6638 		txq->txq_soft[i].txs_mbuf = NULL;
   6639 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6640 	txq->txq_snext = 0;
   6641 	txq->txq_sdirty = 0;
   6642 }
   6643 
   6644 static void
   6645 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6646     struct wm_txqueue *txq)
   6647 {
   6648 
   6649 	KASSERT(mutex_owned(txq->txq_lock));
   6650 
   6651 	/*
   6652 	 * Set up some register offsets that are different between
   6653 	 * the i82542 and the i82543 and later chips.
   6654 	 */
   6655 	if (sc->sc_type < WM_T_82543)
   6656 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6657 	else
   6658 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6659 
   6660 	wm_init_tx_descs(sc, txq);
   6661 	wm_init_tx_regs(sc, wmq, txq);
   6662 	wm_init_tx_buffer(sc, txq);
   6663 
   6664 	txq->txq_watchdog = false;
   6665 }
   6666 
   6667 static void
   6668 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6669     struct wm_rxqueue *rxq)
   6670 {
   6671 
   6672 	KASSERT(mutex_owned(rxq->rxq_lock));
   6673 
   6674 	/*
   6675 	 * Initialize the receive descriptor and receive job
   6676 	 * descriptor rings.
   6677 	 */
   6678 	if (sc->sc_type < WM_T_82543) {
   6679 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6680 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6681 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6682 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6683 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6684 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6685 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6686 
   6687 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6688 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6689 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6690 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6691 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6692 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6693 	} else {
   6694 		int qid = wmq->wmq_id;
   6695 
   6696 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6697 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6698 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6699 
   6700 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6701 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6702 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6703 
   6704 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6705 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6706 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6707 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6708 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6709 			    | RXDCTL_WTHRESH(1));
   6710 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6711 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6712 		} else {
   6713 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6714 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6715 			/* XXX should update with AIM? */
   6716 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6717 			/* MUST be same */
   6718 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6719 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6720 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6721 		}
   6722 	}
   6723 }
   6724 
   6725 static int
   6726 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6727 {
   6728 	struct wm_rxsoft *rxs;
   6729 	int error, i;
   6730 
   6731 	KASSERT(mutex_owned(rxq->rxq_lock));
   6732 
   6733 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6734 		rxs = &rxq->rxq_soft[i];
   6735 		if (rxs->rxs_mbuf == NULL) {
   6736 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6737 				log(LOG_ERR, "%s: unable to allocate or map "
   6738 				    "rx buffer %d, error = %d\n",
   6739 				    device_xname(sc->sc_dev), i, error);
   6740 				/*
   6741 				 * XXX Should attempt to run with fewer receive
   6742 				 * XXX buffers instead of just failing.
   6743 				 */
   6744 				wm_rxdrain(rxq);
   6745 				return ENOMEM;
   6746 			}
   6747 		} else {
   6748 			/*
   6749 			 * For 82575 and 82576, the RX descriptors must be
   6750 			 * initialized after the setting of RCTL.EN in
   6751 			 * wm_set_filter()
   6752 			 */
   6753 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6754 				wm_init_rxdesc(rxq, i);
   6755 		}
   6756 	}
   6757 	rxq->rxq_ptr = 0;
   6758 	rxq->rxq_discard = 0;
   6759 	WM_RXCHAIN_RESET(rxq);
   6760 
   6761 	return 0;
   6762 }
   6763 
   6764 static int
   6765 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6766     struct wm_rxqueue *rxq)
   6767 {
   6768 
   6769 	KASSERT(mutex_owned(rxq->rxq_lock));
   6770 
   6771 	/*
   6772 	 * Set up some register offsets that are different between
   6773 	 * the i82542 and the i82543 and later chips.
   6774 	 */
   6775 	if (sc->sc_type < WM_T_82543)
   6776 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6777 	else
   6778 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6779 
   6780 	wm_init_rx_regs(sc, wmq, rxq);
   6781 	return wm_init_rx_buffer(sc, rxq);
   6782 }
   6783 
   6784 /*
   6785  * wm_init_quques:
   6786  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6787  */
   6788 static int
   6789 wm_init_txrx_queues(struct wm_softc *sc)
   6790 {
   6791 	int i, error = 0;
   6792 
   6793 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6794 		device_xname(sc->sc_dev), __func__));
   6795 
   6796 	for (i = 0; i < sc->sc_nqueues; i++) {
   6797 		struct wm_queue *wmq = &sc->sc_queue[i];
   6798 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6799 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6800 
   6801 		/*
   6802 		 * TODO
   6803 		 * Currently, use constant variable instead of AIM.
   6804 		 * Furthermore, the interrupt interval of multiqueue which use
   6805 		 * polling mode is less than default value.
   6806 		 * More tuning and AIM are required.
   6807 		 */
   6808 		if (wm_is_using_multiqueue(sc))
   6809 			wmq->wmq_itr = 50;
   6810 		else
   6811 			wmq->wmq_itr = sc->sc_itr_init;
   6812 		wmq->wmq_set_itr = true;
   6813 
   6814 		mutex_enter(txq->txq_lock);
   6815 		wm_init_tx_queue(sc, wmq, txq);
   6816 		mutex_exit(txq->txq_lock);
   6817 
   6818 		mutex_enter(rxq->rxq_lock);
   6819 		error = wm_init_rx_queue(sc, wmq, rxq);
   6820 		mutex_exit(rxq->rxq_lock);
   6821 		if (error)
   6822 			break;
   6823 	}
   6824 
   6825 	return error;
   6826 }
   6827 
   6828 /*
   6829  * wm_tx_offload:
   6830  *
   6831  *	Set up TCP/IP checksumming parameters for the
   6832  *	specified packet.
   6833  */
   6834 static int
   6835 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6836     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6837 {
   6838 	struct mbuf *m0 = txs->txs_mbuf;
   6839 	struct livengood_tcpip_ctxdesc *t;
   6840 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6841 	uint32_t ipcse;
   6842 	struct ether_header *eh;
   6843 	int offset, iphl;
   6844 	uint8_t fields;
   6845 
   6846 	/*
   6847 	 * XXX It would be nice if the mbuf pkthdr had offset
   6848 	 * fields for the protocol headers.
   6849 	 */
   6850 
   6851 	eh = mtod(m0, struct ether_header *);
   6852 	switch (htons(eh->ether_type)) {
   6853 	case ETHERTYPE_IP:
   6854 	case ETHERTYPE_IPV6:
   6855 		offset = ETHER_HDR_LEN;
   6856 		break;
   6857 
   6858 	case ETHERTYPE_VLAN:
   6859 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6860 		break;
   6861 
   6862 	default:
   6863 		/*
   6864 		 * Don't support this protocol or encapsulation.
   6865 		 */
   6866 		*fieldsp = 0;
   6867 		*cmdp = 0;
   6868 		return 0;
   6869 	}
   6870 
   6871 	if ((m0->m_pkthdr.csum_flags &
   6872 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6873 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6874 	} else {
   6875 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6876 	}
   6877 	ipcse = offset + iphl - 1;
   6878 
   6879 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6880 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6881 	seg = 0;
   6882 	fields = 0;
   6883 
   6884 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6885 		int hlen = offset + iphl;
   6886 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6887 
   6888 		if (__predict_false(m0->m_len <
   6889 				    (hlen + sizeof(struct tcphdr)))) {
   6890 			/*
   6891 			 * TCP/IP headers are not in the first mbuf; we need
   6892 			 * to do this the slow and painful way.  Let's just
   6893 			 * hope this doesn't happen very often.
   6894 			 */
   6895 			struct tcphdr th;
   6896 
   6897 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6898 
   6899 			m_copydata(m0, hlen, sizeof(th), &th);
   6900 			if (v4) {
   6901 				struct ip ip;
   6902 
   6903 				m_copydata(m0, offset, sizeof(ip), &ip);
   6904 				ip.ip_len = 0;
   6905 				m_copyback(m0,
   6906 				    offset + offsetof(struct ip, ip_len),
   6907 				    sizeof(ip.ip_len), &ip.ip_len);
   6908 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6909 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6910 			} else {
   6911 				struct ip6_hdr ip6;
   6912 
   6913 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6914 				ip6.ip6_plen = 0;
   6915 				m_copyback(m0,
   6916 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6917 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6918 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6919 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6920 			}
   6921 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6922 			    sizeof(th.th_sum), &th.th_sum);
   6923 
   6924 			hlen += th.th_off << 2;
   6925 		} else {
   6926 			/*
   6927 			 * TCP/IP headers are in the first mbuf; we can do
   6928 			 * this the easy way.
   6929 			 */
   6930 			struct tcphdr *th;
   6931 
   6932 			if (v4) {
   6933 				struct ip *ip =
   6934 				    (void *)(mtod(m0, char *) + offset);
   6935 				th = (void *)(mtod(m0, char *) + hlen);
   6936 
   6937 				ip->ip_len = 0;
   6938 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6939 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6940 			} else {
   6941 				struct ip6_hdr *ip6 =
   6942 				    (void *)(mtod(m0, char *) + offset);
   6943 				th = (void *)(mtod(m0, char *) + hlen);
   6944 
   6945 				ip6->ip6_plen = 0;
   6946 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6947 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6948 			}
   6949 			hlen += th->th_off << 2;
   6950 		}
   6951 
   6952 		if (v4) {
   6953 			WM_Q_EVCNT_INCR(txq, txtso);
   6954 			cmdlen |= WTX_TCPIP_CMD_IP;
   6955 		} else {
   6956 			WM_Q_EVCNT_INCR(txq, txtso6);
   6957 			ipcse = 0;
   6958 		}
   6959 		cmd |= WTX_TCPIP_CMD_TSE;
   6960 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6961 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6962 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6963 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6964 	}
   6965 
   6966 	/*
   6967 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6968 	 * offload feature, if we load the context descriptor, we
   6969 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6970 	 */
   6971 
   6972 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6973 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6974 	    WTX_TCPIP_IPCSE(ipcse);
   6975 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6976 		WM_Q_EVCNT_INCR(txq, txipsum);
   6977 		fields |= WTX_IXSM;
   6978 	}
   6979 
   6980 	offset += iphl;
   6981 
   6982 	if (m0->m_pkthdr.csum_flags &
   6983 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6984 		WM_Q_EVCNT_INCR(txq, txtusum);
   6985 		fields |= WTX_TXSM;
   6986 		tucs = WTX_TCPIP_TUCSS(offset) |
   6987 		    WTX_TCPIP_TUCSO(offset +
   6988 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6989 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6990 	} else if ((m0->m_pkthdr.csum_flags &
   6991 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6992 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6993 		fields |= WTX_TXSM;
   6994 		tucs = WTX_TCPIP_TUCSS(offset) |
   6995 		    WTX_TCPIP_TUCSO(offset +
   6996 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6997 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6998 	} else {
   6999 		/* Just initialize it to a valid TCP context. */
   7000 		tucs = WTX_TCPIP_TUCSS(offset) |
   7001 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7002 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7003 	}
   7004 
   7005 	/*
   7006 	 * We don't have to write context descriptor for every packet
   7007 	 * except for 82574. For 82574, we must write context descriptor
   7008 	 * for every packet when we use two descriptor queues.
   7009 	 * It would be overhead to write context descriptor for every packet,
   7010 	 * however it does not cause problems.
   7011 	 */
   7012 	/* Fill in the context descriptor. */
   7013 	t = (struct livengood_tcpip_ctxdesc *)
   7014 	    &txq->txq_descs[txq->txq_next];
   7015 	t->tcpip_ipcs = htole32(ipcs);
   7016 	t->tcpip_tucs = htole32(tucs);
   7017 	t->tcpip_cmdlen = htole32(cmdlen);
   7018 	t->tcpip_seg = htole32(seg);
   7019 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7020 
   7021 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7022 	txs->txs_ndesc++;
   7023 
   7024 	*cmdp = cmd;
   7025 	*fieldsp = fields;
   7026 
   7027 	return 0;
   7028 }
   7029 
   7030 static inline int
   7031 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7032 {
   7033 	struct wm_softc *sc = ifp->if_softc;
   7034 	u_int cpuid = cpu_index(curcpu());
   7035 
   7036 	/*
   7037 	 * Currently, simple distribute strategy.
   7038 	 * TODO:
   7039 	 * distribute by flowid(RSS has value).
   7040 	 */
   7041         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7042 }
   7043 
   7044 /*
   7045  * wm_start:		[ifnet interface function]
   7046  *
   7047  *	Start packet transmission on the interface.
   7048  */
   7049 static void
   7050 wm_start(struct ifnet *ifp)
   7051 {
   7052 	struct wm_softc *sc = ifp->if_softc;
   7053 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7054 
   7055 #ifdef WM_MPSAFE
   7056 	KASSERT(if_is_mpsafe(ifp));
   7057 #endif
   7058 	/*
   7059 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7060 	 */
   7061 
   7062 	mutex_enter(txq->txq_lock);
   7063 	if (!txq->txq_stopping)
   7064 		wm_start_locked(ifp);
   7065 	mutex_exit(txq->txq_lock);
   7066 }
   7067 
   7068 static void
   7069 wm_start_locked(struct ifnet *ifp)
   7070 {
   7071 	struct wm_softc *sc = ifp->if_softc;
   7072 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7073 
   7074 	wm_send_common_locked(ifp, txq, false);
   7075 }
   7076 
   7077 static int
   7078 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7079 {
   7080 	int qid;
   7081 	struct wm_softc *sc = ifp->if_softc;
   7082 	struct wm_txqueue *txq;
   7083 
   7084 	qid = wm_select_txqueue(ifp, m);
   7085 	txq = &sc->sc_queue[qid].wmq_txq;
   7086 
   7087 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7088 		m_freem(m);
   7089 		WM_Q_EVCNT_INCR(txq, txdrop);
   7090 		return ENOBUFS;
   7091 	}
   7092 
   7093 	/*
   7094 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7095 	 */
   7096 	ifp->if_obytes += m->m_pkthdr.len;
   7097 	if (m->m_flags & M_MCAST)
   7098 		ifp->if_omcasts++;
   7099 
   7100 	if (mutex_tryenter(txq->txq_lock)) {
   7101 		if (!txq->txq_stopping)
   7102 			wm_transmit_locked(ifp, txq);
   7103 		mutex_exit(txq->txq_lock);
   7104 	}
   7105 
   7106 	return 0;
   7107 }
   7108 
   7109 static void
   7110 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7111 {
   7112 
   7113 	wm_send_common_locked(ifp, txq, true);
   7114 }
   7115 
   7116 static void
   7117 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7118     bool is_transmit)
   7119 {
   7120 	struct wm_softc *sc = ifp->if_softc;
   7121 	struct mbuf *m0;
   7122 	struct wm_txsoft *txs;
   7123 	bus_dmamap_t dmamap;
   7124 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7125 	bus_addr_t curaddr;
   7126 	bus_size_t seglen, curlen;
   7127 	uint32_t cksumcmd;
   7128 	uint8_t cksumfields;
   7129 
   7130 	KASSERT(mutex_owned(txq->txq_lock));
   7131 
   7132 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7133 		return;
   7134 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7135 		return;
   7136 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7137 		return;
   7138 
   7139 	/* Remember the previous number of free descriptors. */
   7140 	ofree = txq->txq_free;
   7141 
   7142 	/*
   7143 	 * Loop through the send queue, setting up transmit descriptors
   7144 	 * until we drain the queue, or use up all available transmit
   7145 	 * descriptors.
   7146 	 */
   7147 	for (;;) {
   7148 		m0 = NULL;
   7149 
   7150 		/* Get a work queue entry. */
   7151 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7152 			wm_txeof(txq, UINT_MAX);
   7153 			if (txq->txq_sfree == 0) {
   7154 				DPRINTF(WM_DEBUG_TX,
   7155 				    ("%s: TX: no free job descriptors\n",
   7156 					device_xname(sc->sc_dev)));
   7157 				WM_Q_EVCNT_INCR(txq, txsstall);
   7158 				break;
   7159 			}
   7160 		}
   7161 
   7162 		/* Grab a packet off the queue. */
   7163 		if (is_transmit)
   7164 			m0 = pcq_get(txq->txq_interq);
   7165 		else
   7166 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7167 		if (m0 == NULL)
   7168 			break;
   7169 
   7170 		DPRINTF(WM_DEBUG_TX,
   7171 		    ("%s: TX: have packet to transmit: %p\n",
   7172 		    device_xname(sc->sc_dev), m0));
   7173 
   7174 		txs = &txq->txq_soft[txq->txq_snext];
   7175 		dmamap = txs->txs_dmamap;
   7176 
   7177 		use_tso = (m0->m_pkthdr.csum_flags &
   7178 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7179 
   7180 		/*
   7181 		 * So says the Linux driver:
   7182 		 * The controller does a simple calculation to make sure
   7183 		 * there is enough room in the FIFO before initiating the
   7184 		 * DMA for each buffer.  The calc is:
   7185 		 *	4 = ceil(buffer len / MSS)
   7186 		 * To make sure we don't overrun the FIFO, adjust the max
   7187 		 * buffer len if the MSS drops.
   7188 		 */
   7189 		dmamap->dm_maxsegsz =
   7190 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7191 		    ? m0->m_pkthdr.segsz << 2
   7192 		    : WTX_MAX_LEN;
   7193 
   7194 		/*
   7195 		 * Load the DMA map.  If this fails, the packet either
   7196 		 * didn't fit in the allotted number of segments, or we
   7197 		 * were short on resources.  For the too-many-segments
   7198 		 * case, we simply report an error and drop the packet,
   7199 		 * since we can't sanely copy a jumbo packet to a single
   7200 		 * buffer.
   7201 		 */
   7202 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7203 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7204 		if (error) {
   7205 			if (error == EFBIG) {
   7206 				WM_Q_EVCNT_INCR(txq, txdrop);
   7207 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7208 				    "DMA segments, dropping...\n",
   7209 				    device_xname(sc->sc_dev));
   7210 				wm_dump_mbuf_chain(sc, m0);
   7211 				m_freem(m0);
   7212 				continue;
   7213 			}
   7214 			/*  Short on resources, just stop for now. */
   7215 			DPRINTF(WM_DEBUG_TX,
   7216 			    ("%s: TX: dmamap load failed: %d\n",
   7217 			    device_xname(sc->sc_dev), error));
   7218 			break;
   7219 		}
   7220 
   7221 		segs_needed = dmamap->dm_nsegs;
   7222 		if (use_tso) {
   7223 			/* For sentinel descriptor; see below. */
   7224 			segs_needed++;
   7225 		}
   7226 
   7227 		/*
   7228 		 * Ensure we have enough descriptors free to describe
   7229 		 * the packet.  Note, we always reserve one descriptor
   7230 		 * at the end of the ring due to the semantics of the
   7231 		 * TDT register, plus one more in the event we need
   7232 		 * to load offload context.
   7233 		 */
   7234 		if (segs_needed > txq->txq_free - 2) {
   7235 			/*
   7236 			 * Not enough free descriptors to transmit this
   7237 			 * packet.  We haven't committed anything yet,
   7238 			 * so just unload the DMA map, put the packet
   7239 			 * pack on the queue, and punt.  Notify the upper
   7240 			 * layer that there are no more slots left.
   7241 			 */
   7242 			DPRINTF(WM_DEBUG_TX,
   7243 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7244 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7245 			    segs_needed, txq->txq_free - 1));
   7246 			if (!is_transmit)
   7247 				ifp->if_flags |= IFF_OACTIVE;
   7248 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7249 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7250 			WM_Q_EVCNT_INCR(txq, txdstall);
   7251 			break;
   7252 		}
   7253 
   7254 		/*
   7255 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7256 		 * once we know we can transmit the packet, since we
   7257 		 * do some internal FIFO space accounting here.
   7258 		 */
   7259 		if (sc->sc_type == WM_T_82547 &&
   7260 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7261 			DPRINTF(WM_DEBUG_TX,
   7262 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7263 			    device_xname(sc->sc_dev)));
   7264 			if (!is_transmit)
   7265 				ifp->if_flags |= IFF_OACTIVE;
   7266 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7267 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7268 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7269 			break;
   7270 		}
   7271 
   7272 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7273 
   7274 		DPRINTF(WM_DEBUG_TX,
   7275 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7276 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7277 
   7278 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7279 
   7280 		/*
   7281 		 * Store a pointer to the packet so that we can free it
   7282 		 * later.
   7283 		 *
   7284 		 * Initially, we consider the number of descriptors the
   7285 		 * packet uses the number of DMA segments.  This may be
   7286 		 * incremented by 1 if we do checksum offload (a descriptor
   7287 		 * is used to set the checksum context).
   7288 		 */
   7289 		txs->txs_mbuf = m0;
   7290 		txs->txs_firstdesc = txq->txq_next;
   7291 		txs->txs_ndesc = segs_needed;
   7292 
   7293 		/* Set up offload parameters for this packet. */
   7294 		if (m0->m_pkthdr.csum_flags &
   7295 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7296 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7297 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7298 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7299 					  &cksumfields) != 0) {
   7300 				/* Error message already displayed. */
   7301 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7302 				continue;
   7303 			}
   7304 		} else {
   7305 			cksumcmd = 0;
   7306 			cksumfields = 0;
   7307 		}
   7308 
   7309 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7310 
   7311 		/* Sync the DMA map. */
   7312 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7313 		    BUS_DMASYNC_PREWRITE);
   7314 
   7315 		/* Initialize the transmit descriptor. */
   7316 		for (nexttx = txq->txq_next, seg = 0;
   7317 		     seg < dmamap->dm_nsegs; seg++) {
   7318 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7319 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7320 			     seglen != 0;
   7321 			     curaddr += curlen, seglen -= curlen,
   7322 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7323 				curlen = seglen;
   7324 
   7325 				/*
   7326 				 * So says the Linux driver:
   7327 				 * Work around for premature descriptor
   7328 				 * write-backs in TSO mode.  Append a
   7329 				 * 4-byte sentinel descriptor.
   7330 				 */
   7331 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7332 				    curlen > 8)
   7333 					curlen -= 4;
   7334 
   7335 				wm_set_dma_addr(
   7336 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7337 				txq->txq_descs[nexttx].wtx_cmdlen
   7338 				    = htole32(cksumcmd | curlen);
   7339 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7340 				    = 0;
   7341 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7342 				    = cksumfields;
   7343 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7344 				lasttx = nexttx;
   7345 
   7346 				DPRINTF(WM_DEBUG_TX,
   7347 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7348 				     "len %#04zx\n",
   7349 				    device_xname(sc->sc_dev), nexttx,
   7350 				    (uint64_t)curaddr, curlen));
   7351 			}
   7352 		}
   7353 
   7354 		KASSERT(lasttx != -1);
   7355 
   7356 		/*
   7357 		 * Set up the command byte on the last descriptor of
   7358 		 * the packet.  If we're in the interrupt delay window,
   7359 		 * delay the interrupt.
   7360 		 */
   7361 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7362 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7363 
   7364 		/*
   7365 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7366 		 * up the descriptor to encapsulate the packet for us.
   7367 		 *
   7368 		 * This is only valid on the last descriptor of the packet.
   7369 		 */
   7370 		if (vlan_has_tag(m0)) {
   7371 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7372 			    htole32(WTX_CMD_VLE);
   7373 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7374 			    = htole16(vlan_get_tag(m0));
   7375 		}
   7376 
   7377 		txs->txs_lastdesc = lasttx;
   7378 
   7379 		DPRINTF(WM_DEBUG_TX,
   7380 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7381 		    device_xname(sc->sc_dev),
   7382 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7383 
   7384 		/* Sync the descriptors we're using. */
   7385 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7386 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7387 
   7388 		/* Give the packet to the chip. */
   7389 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7390 
   7391 		DPRINTF(WM_DEBUG_TX,
   7392 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7393 
   7394 		DPRINTF(WM_DEBUG_TX,
   7395 		    ("%s: TX: finished transmitting packet, job %d\n",
   7396 		    device_xname(sc->sc_dev), txq->txq_snext));
   7397 
   7398 		/* Advance the tx pointer. */
   7399 		txq->txq_free -= txs->txs_ndesc;
   7400 		txq->txq_next = nexttx;
   7401 
   7402 		txq->txq_sfree--;
   7403 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7404 
   7405 		/* Pass the packet to any BPF listeners. */
   7406 		bpf_mtap(ifp, m0);
   7407 	}
   7408 
   7409 	if (m0 != NULL) {
   7410 		if (!is_transmit)
   7411 			ifp->if_flags |= IFF_OACTIVE;
   7412 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7413 		WM_Q_EVCNT_INCR(txq, txdrop);
   7414 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7415 			__func__));
   7416 		m_freem(m0);
   7417 	}
   7418 
   7419 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7420 		/* No more slots; notify upper layer. */
   7421 		if (!is_transmit)
   7422 			ifp->if_flags |= IFF_OACTIVE;
   7423 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7424 	}
   7425 
   7426 	if (txq->txq_free != ofree) {
   7427 		/* Set a watchdog timer in case the chip flakes out. */
   7428 		txq->txq_lastsent = time_uptime;
   7429 		txq->txq_watchdog = true;
   7430 	}
   7431 }
   7432 
   7433 /*
   7434  * wm_nq_tx_offload:
   7435  *
   7436  *	Set up TCP/IP checksumming parameters for the
   7437  *	specified packet, for NEWQUEUE devices
   7438  */
   7439 static int
   7440 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7441     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7442 {
   7443 	struct mbuf *m0 = txs->txs_mbuf;
   7444 	uint32_t vl_len, mssidx, cmdc;
   7445 	struct ether_header *eh;
   7446 	int offset, iphl;
   7447 
   7448 	/*
   7449 	 * XXX It would be nice if the mbuf pkthdr had offset
   7450 	 * fields for the protocol headers.
   7451 	 */
   7452 	*cmdlenp = 0;
   7453 	*fieldsp = 0;
   7454 
   7455 	eh = mtod(m0, struct ether_header *);
   7456 	switch (htons(eh->ether_type)) {
   7457 	case ETHERTYPE_IP:
   7458 	case ETHERTYPE_IPV6:
   7459 		offset = ETHER_HDR_LEN;
   7460 		break;
   7461 
   7462 	case ETHERTYPE_VLAN:
   7463 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7464 		break;
   7465 
   7466 	default:
   7467 		/* Don't support this protocol or encapsulation. */
   7468 		*do_csum = false;
   7469 		return 0;
   7470 	}
   7471 	*do_csum = true;
   7472 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7473 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7474 
   7475 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7476 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7477 
   7478 	if ((m0->m_pkthdr.csum_flags &
   7479 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7480 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7481 	} else {
   7482 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7483 	}
   7484 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7485 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7486 
   7487 	if (vlan_has_tag(m0)) {
   7488 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7489 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7490 		*cmdlenp |= NQTX_CMD_VLE;
   7491 	}
   7492 
   7493 	mssidx = 0;
   7494 
   7495 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7496 		int hlen = offset + iphl;
   7497 		int tcp_hlen;
   7498 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7499 
   7500 		if (__predict_false(m0->m_len <
   7501 				    (hlen + sizeof(struct tcphdr)))) {
   7502 			/*
   7503 			 * TCP/IP headers are not in the first mbuf; we need
   7504 			 * to do this the slow and painful way.  Let's just
   7505 			 * hope this doesn't happen very often.
   7506 			 */
   7507 			struct tcphdr th;
   7508 
   7509 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7510 
   7511 			m_copydata(m0, hlen, sizeof(th), &th);
   7512 			if (v4) {
   7513 				struct ip ip;
   7514 
   7515 				m_copydata(m0, offset, sizeof(ip), &ip);
   7516 				ip.ip_len = 0;
   7517 				m_copyback(m0,
   7518 				    offset + offsetof(struct ip, ip_len),
   7519 				    sizeof(ip.ip_len), &ip.ip_len);
   7520 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7521 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7522 			} else {
   7523 				struct ip6_hdr ip6;
   7524 
   7525 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7526 				ip6.ip6_plen = 0;
   7527 				m_copyback(m0,
   7528 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7529 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7530 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7531 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7532 			}
   7533 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7534 			    sizeof(th.th_sum), &th.th_sum);
   7535 
   7536 			tcp_hlen = th.th_off << 2;
   7537 		} else {
   7538 			/*
   7539 			 * TCP/IP headers are in the first mbuf; we can do
   7540 			 * this the easy way.
   7541 			 */
   7542 			struct tcphdr *th;
   7543 
   7544 			if (v4) {
   7545 				struct ip *ip =
   7546 				    (void *)(mtod(m0, char *) + offset);
   7547 				th = (void *)(mtod(m0, char *) + hlen);
   7548 
   7549 				ip->ip_len = 0;
   7550 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7551 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7552 			} else {
   7553 				struct ip6_hdr *ip6 =
   7554 				    (void *)(mtod(m0, char *) + offset);
   7555 				th = (void *)(mtod(m0, char *) + hlen);
   7556 
   7557 				ip6->ip6_plen = 0;
   7558 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7559 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7560 			}
   7561 			tcp_hlen = th->th_off << 2;
   7562 		}
   7563 		hlen += tcp_hlen;
   7564 		*cmdlenp |= NQTX_CMD_TSE;
   7565 
   7566 		if (v4) {
   7567 			WM_Q_EVCNT_INCR(txq, txtso);
   7568 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7569 		} else {
   7570 			WM_Q_EVCNT_INCR(txq, txtso6);
   7571 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7572 		}
   7573 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7574 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7575 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7576 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7577 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7578 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7579 	} else {
   7580 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7581 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7582 	}
   7583 
   7584 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7585 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7586 		cmdc |= NQTXC_CMD_IP4;
   7587 	}
   7588 
   7589 	if (m0->m_pkthdr.csum_flags &
   7590 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7591 		WM_Q_EVCNT_INCR(txq, txtusum);
   7592 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7593 			cmdc |= NQTXC_CMD_TCP;
   7594 		} else {
   7595 			cmdc |= NQTXC_CMD_UDP;
   7596 		}
   7597 		cmdc |= NQTXC_CMD_IP4;
   7598 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7599 	}
   7600 	if (m0->m_pkthdr.csum_flags &
   7601 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7602 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7603 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7604 			cmdc |= NQTXC_CMD_TCP;
   7605 		} else {
   7606 			cmdc |= NQTXC_CMD_UDP;
   7607 		}
   7608 		cmdc |= NQTXC_CMD_IP6;
   7609 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7610 	}
   7611 
   7612 	/*
   7613 	 * We don't have to write context descriptor for every packet to
   7614 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7615 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7616 	 * controllers.
   7617 	 * It would be overhead to write context descriptor for every packet,
   7618 	 * however it does not cause problems.
   7619 	 */
   7620 	/* Fill in the context descriptor. */
   7621 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7622 	    htole32(vl_len);
   7623 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7624 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7625 	    htole32(cmdc);
   7626 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7627 	    htole32(mssidx);
   7628 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7629 	DPRINTF(WM_DEBUG_TX,
   7630 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7631 	    txq->txq_next, 0, vl_len));
   7632 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7633 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7634 	txs->txs_ndesc++;
   7635 	return 0;
   7636 }
   7637 
   7638 /*
   7639  * wm_nq_start:		[ifnet interface function]
   7640  *
   7641  *	Start packet transmission on the interface for NEWQUEUE devices
   7642  */
   7643 static void
   7644 wm_nq_start(struct ifnet *ifp)
   7645 {
   7646 	struct wm_softc *sc = ifp->if_softc;
   7647 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7648 
   7649 #ifdef WM_MPSAFE
   7650 	KASSERT(if_is_mpsafe(ifp));
   7651 #endif
   7652 	/*
   7653 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7654 	 */
   7655 
   7656 	mutex_enter(txq->txq_lock);
   7657 	if (!txq->txq_stopping)
   7658 		wm_nq_start_locked(ifp);
   7659 	mutex_exit(txq->txq_lock);
   7660 }
   7661 
   7662 static void
   7663 wm_nq_start_locked(struct ifnet *ifp)
   7664 {
   7665 	struct wm_softc *sc = ifp->if_softc;
   7666 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7667 
   7668 	wm_nq_send_common_locked(ifp, txq, false);
   7669 }
   7670 
   7671 static int
   7672 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7673 {
   7674 	int qid;
   7675 	struct wm_softc *sc = ifp->if_softc;
   7676 	struct wm_txqueue *txq;
   7677 
   7678 	qid = wm_select_txqueue(ifp, m);
   7679 	txq = &sc->sc_queue[qid].wmq_txq;
   7680 
   7681 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7682 		m_freem(m);
   7683 		WM_Q_EVCNT_INCR(txq, txdrop);
   7684 		return ENOBUFS;
   7685 	}
   7686 
   7687 	/*
   7688 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7689 	 */
   7690 	ifp->if_obytes += m->m_pkthdr.len;
   7691 	if (m->m_flags & M_MCAST)
   7692 		ifp->if_omcasts++;
   7693 
   7694 	/*
   7695 	 * The situations which this mutex_tryenter() fails at running time
   7696 	 * are below two patterns.
   7697 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7698 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7699 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7700 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7701 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7702 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7703 	 */
   7704 	if (mutex_tryenter(txq->txq_lock)) {
   7705 		if (!txq->txq_stopping)
   7706 			wm_nq_transmit_locked(ifp, txq);
   7707 		mutex_exit(txq->txq_lock);
   7708 	}
   7709 
   7710 	return 0;
   7711 }
   7712 
   7713 static void
   7714 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7715 {
   7716 
   7717 	wm_nq_send_common_locked(ifp, txq, true);
   7718 }
   7719 
   7720 static void
   7721 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7722     bool is_transmit)
   7723 {
   7724 	struct wm_softc *sc = ifp->if_softc;
   7725 	struct mbuf *m0;
   7726 	struct wm_txsoft *txs;
   7727 	bus_dmamap_t dmamap;
   7728 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7729 	bool do_csum, sent;
   7730 
   7731 	KASSERT(mutex_owned(txq->txq_lock));
   7732 
   7733 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7734 		return;
   7735 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7736 		return;
   7737 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7738 		return;
   7739 
   7740 	sent = false;
   7741 
   7742 	/*
   7743 	 * Loop through the send queue, setting up transmit descriptors
   7744 	 * until we drain the queue, or use up all available transmit
   7745 	 * descriptors.
   7746 	 */
   7747 	for (;;) {
   7748 		m0 = NULL;
   7749 
   7750 		/* Get a work queue entry. */
   7751 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7752 			wm_txeof(txq, UINT_MAX);
   7753 			if (txq->txq_sfree == 0) {
   7754 				DPRINTF(WM_DEBUG_TX,
   7755 				    ("%s: TX: no free job descriptors\n",
   7756 					device_xname(sc->sc_dev)));
   7757 				WM_Q_EVCNT_INCR(txq, txsstall);
   7758 				break;
   7759 			}
   7760 		}
   7761 
   7762 		/* Grab a packet off the queue. */
   7763 		if (is_transmit)
   7764 			m0 = pcq_get(txq->txq_interq);
   7765 		else
   7766 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7767 		if (m0 == NULL)
   7768 			break;
   7769 
   7770 		DPRINTF(WM_DEBUG_TX,
   7771 		    ("%s: TX: have packet to transmit: %p\n",
   7772 		    device_xname(sc->sc_dev), m0));
   7773 
   7774 		txs = &txq->txq_soft[txq->txq_snext];
   7775 		dmamap = txs->txs_dmamap;
   7776 
   7777 		/*
   7778 		 * Load the DMA map.  If this fails, the packet either
   7779 		 * didn't fit in the allotted number of segments, or we
   7780 		 * were short on resources.  For the too-many-segments
   7781 		 * case, we simply report an error and drop the packet,
   7782 		 * since we can't sanely copy a jumbo packet to a single
   7783 		 * buffer.
   7784 		 */
   7785 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7786 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7787 		if (error) {
   7788 			if (error == EFBIG) {
   7789 				WM_Q_EVCNT_INCR(txq, txdrop);
   7790 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7791 				    "DMA segments, dropping...\n",
   7792 				    device_xname(sc->sc_dev));
   7793 				wm_dump_mbuf_chain(sc, m0);
   7794 				m_freem(m0);
   7795 				continue;
   7796 			}
   7797 			/* Short on resources, just stop for now. */
   7798 			DPRINTF(WM_DEBUG_TX,
   7799 			    ("%s: TX: dmamap load failed: %d\n",
   7800 			    device_xname(sc->sc_dev), error));
   7801 			break;
   7802 		}
   7803 
   7804 		segs_needed = dmamap->dm_nsegs;
   7805 
   7806 		/*
   7807 		 * Ensure we have enough descriptors free to describe
   7808 		 * the packet.  Note, we always reserve one descriptor
   7809 		 * at the end of the ring due to the semantics of the
   7810 		 * TDT register, plus one more in the event we need
   7811 		 * to load offload context.
   7812 		 */
   7813 		if (segs_needed > txq->txq_free - 2) {
   7814 			/*
   7815 			 * Not enough free descriptors to transmit this
   7816 			 * packet.  We haven't committed anything yet,
   7817 			 * so just unload the DMA map, put the packet
   7818 			 * pack on the queue, and punt.  Notify the upper
   7819 			 * layer that there are no more slots left.
   7820 			 */
   7821 			DPRINTF(WM_DEBUG_TX,
   7822 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7823 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7824 			    segs_needed, txq->txq_free - 1));
   7825 			if (!is_transmit)
   7826 				ifp->if_flags |= IFF_OACTIVE;
   7827 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7828 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7829 			WM_Q_EVCNT_INCR(txq, txdstall);
   7830 			break;
   7831 		}
   7832 
   7833 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7834 
   7835 		DPRINTF(WM_DEBUG_TX,
   7836 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7837 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7838 
   7839 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7840 
   7841 		/*
   7842 		 * Store a pointer to the packet so that we can free it
   7843 		 * later.
   7844 		 *
   7845 		 * Initially, we consider the number of descriptors the
   7846 		 * packet uses the number of DMA segments.  This may be
   7847 		 * incremented by 1 if we do checksum offload (a descriptor
   7848 		 * is used to set the checksum context).
   7849 		 */
   7850 		txs->txs_mbuf = m0;
   7851 		txs->txs_firstdesc = txq->txq_next;
   7852 		txs->txs_ndesc = segs_needed;
   7853 
   7854 		/* Set up offload parameters for this packet. */
   7855 		uint32_t cmdlen, fields, dcmdlen;
   7856 		if (m0->m_pkthdr.csum_flags &
   7857 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7858 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7859 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7860 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7861 			    &do_csum) != 0) {
   7862 				/* Error message already displayed. */
   7863 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7864 				continue;
   7865 			}
   7866 		} else {
   7867 			do_csum = false;
   7868 			cmdlen = 0;
   7869 			fields = 0;
   7870 		}
   7871 
   7872 		/* Sync the DMA map. */
   7873 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7874 		    BUS_DMASYNC_PREWRITE);
   7875 
   7876 		/* Initialize the first transmit descriptor. */
   7877 		nexttx = txq->txq_next;
   7878 		if (!do_csum) {
   7879 			/* setup a legacy descriptor */
   7880 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7881 			    dmamap->dm_segs[0].ds_addr);
   7882 			txq->txq_descs[nexttx].wtx_cmdlen =
   7883 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7884 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7885 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7886 			if (vlan_has_tag(m0)) {
   7887 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7888 				    htole32(WTX_CMD_VLE);
   7889 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7890 				    htole16(vlan_get_tag(m0));
   7891 			} else {
   7892 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7893 			}
   7894 			dcmdlen = 0;
   7895 		} else {
   7896 			/* setup an advanced data descriptor */
   7897 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7898 			    htole64(dmamap->dm_segs[0].ds_addr);
   7899 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7900 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7901 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7902 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7903 			    htole32(fields);
   7904 			DPRINTF(WM_DEBUG_TX,
   7905 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7906 			    device_xname(sc->sc_dev), nexttx,
   7907 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7908 			DPRINTF(WM_DEBUG_TX,
   7909 			    ("\t 0x%08x%08x\n", fields,
   7910 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7911 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7912 		}
   7913 
   7914 		lasttx = nexttx;
   7915 		nexttx = WM_NEXTTX(txq, nexttx);
   7916 		/*
   7917 		 * fill in the next descriptors. legacy or advanced format
   7918 		 * is the same here
   7919 		 */
   7920 		for (seg = 1; seg < dmamap->dm_nsegs;
   7921 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7922 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7923 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7924 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7925 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7926 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7927 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7928 			lasttx = nexttx;
   7929 
   7930 			DPRINTF(WM_DEBUG_TX,
   7931 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7932 			     "len %#04zx\n",
   7933 			    device_xname(sc->sc_dev), nexttx,
   7934 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7935 			    dmamap->dm_segs[seg].ds_len));
   7936 		}
   7937 
   7938 		KASSERT(lasttx != -1);
   7939 
   7940 		/*
   7941 		 * Set up the command byte on the last descriptor of
   7942 		 * the packet.  If we're in the interrupt delay window,
   7943 		 * delay the interrupt.
   7944 		 */
   7945 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7946 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7947 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7948 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7949 
   7950 		txs->txs_lastdesc = lasttx;
   7951 
   7952 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7953 		    device_xname(sc->sc_dev),
   7954 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7955 
   7956 		/* Sync the descriptors we're using. */
   7957 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7958 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7959 
   7960 		/* Give the packet to the chip. */
   7961 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7962 		sent = true;
   7963 
   7964 		DPRINTF(WM_DEBUG_TX,
   7965 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7966 
   7967 		DPRINTF(WM_DEBUG_TX,
   7968 		    ("%s: TX: finished transmitting packet, job %d\n",
   7969 		    device_xname(sc->sc_dev), txq->txq_snext));
   7970 
   7971 		/* Advance the tx pointer. */
   7972 		txq->txq_free -= txs->txs_ndesc;
   7973 		txq->txq_next = nexttx;
   7974 
   7975 		txq->txq_sfree--;
   7976 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7977 
   7978 		/* Pass the packet to any BPF listeners. */
   7979 		bpf_mtap(ifp, m0);
   7980 	}
   7981 
   7982 	if (m0 != NULL) {
   7983 		if (!is_transmit)
   7984 			ifp->if_flags |= IFF_OACTIVE;
   7985 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7986 		WM_Q_EVCNT_INCR(txq, txdrop);
   7987 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7988 			__func__));
   7989 		m_freem(m0);
   7990 	}
   7991 
   7992 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7993 		/* No more slots; notify upper layer. */
   7994 		if (!is_transmit)
   7995 			ifp->if_flags |= IFF_OACTIVE;
   7996 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7997 	}
   7998 
   7999 	if (sent) {
   8000 		/* Set a watchdog timer in case the chip flakes out. */
   8001 		txq->txq_lastsent = time_uptime;
   8002 		txq->txq_watchdog = true;
   8003 	}
   8004 }
   8005 
   8006 static void
   8007 wm_deferred_start_locked(struct wm_txqueue *txq)
   8008 {
   8009 	struct wm_softc *sc = txq->txq_sc;
   8010 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8011 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8012 	int qid = wmq->wmq_id;
   8013 
   8014 	KASSERT(mutex_owned(txq->txq_lock));
   8015 
   8016 	if (txq->txq_stopping) {
   8017 		mutex_exit(txq->txq_lock);
   8018 		return;
   8019 	}
   8020 
   8021 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8022 		/* XXX need for ALTQ or one CPU system */
   8023 		if (qid == 0)
   8024 			wm_nq_start_locked(ifp);
   8025 		wm_nq_transmit_locked(ifp, txq);
   8026 	} else {
   8027 		/* XXX need for ALTQ or one CPU system */
   8028 		if (qid == 0)
   8029 			wm_start_locked(ifp);
   8030 		wm_transmit_locked(ifp, txq);
   8031 	}
   8032 }
   8033 
   8034 /* Interrupt */
   8035 
   8036 /*
   8037  * wm_txeof:
   8038  *
   8039  *	Helper; handle transmit interrupts.
   8040  */
   8041 static bool
   8042 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8043 {
   8044 	struct wm_softc *sc = txq->txq_sc;
   8045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8046 	struct wm_txsoft *txs;
   8047 	int count = 0;
   8048 	int i;
   8049 	uint8_t status;
   8050 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8051 	bool more = false;
   8052 
   8053 	KASSERT(mutex_owned(txq->txq_lock));
   8054 
   8055 	if (txq->txq_stopping)
   8056 		return false;
   8057 
   8058 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8059 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8060 	if (wmq->wmq_id == 0)
   8061 		ifp->if_flags &= ~IFF_OACTIVE;
   8062 
   8063 	/*
   8064 	 * Go through the Tx list and free mbufs for those
   8065 	 * frames which have been transmitted.
   8066 	 */
   8067 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8068 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8069 		if (limit-- == 0) {
   8070 			more = true;
   8071 			DPRINTF(WM_DEBUG_TX,
   8072 			    ("%s: TX: loop limited, job %d is not processed\n",
   8073 				device_xname(sc->sc_dev), i));
   8074 			break;
   8075 		}
   8076 
   8077 		txs = &txq->txq_soft[i];
   8078 
   8079 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8080 			device_xname(sc->sc_dev), i));
   8081 
   8082 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8083 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8084 
   8085 		status =
   8086 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8087 		if ((status & WTX_ST_DD) == 0) {
   8088 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8089 			    BUS_DMASYNC_PREREAD);
   8090 			break;
   8091 		}
   8092 
   8093 		count++;
   8094 		DPRINTF(WM_DEBUG_TX,
   8095 		    ("%s: TX: job %d done: descs %d..%d\n",
   8096 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8097 		    txs->txs_lastdesc));
   8098 
   8099 		/*
   8100 		 * XXX We should probably be using the statistics
   8101 		 * XXX registers, but I don't know if they exist
   8102 		 * XXX on chips before the i82544.
   8103 		 */
   8104 
   8105 #ifdef WM_EVENT_COUNTERS
   8106 		if (status & WTX_ST_TU)
   8107 			WM_Q_EVCNT_INCR(txq, tu);
   8108 #endif /* WM_EVENT_COUNTERS */
   8109 
   8110 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8111 			ifp->if_oerrors++;
   8112 			if (status & WTX_ST_LC)
   8113 				log(LOG_WARNING, "%s: late collision\n",
   8114 				    device_xname(sc->sc_dev));
   8115 			else if (status & WTX_ST_EC) {
   8116 				ifp->if_collisions += 16;
   8117 				log(LOG_WARNING, "%s: excessive collisions\n",
   8118 				    device_xname(sc->sc_dev));
   8119 			}
   8120 		} else
   8121 			ifp->if_opackets++;
   8122 
   8123 		txq->txq_packets++;
   8124 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8125 
   8126 		txq->txq_free += txs->txs_ndesc;
   8127 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8128 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8129 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8130 		m_freem(txs->txs_mbuf);
   8131 		txs->txs_mbuf = NULL;
   8132 	}
   8133 
   8134 	/* Update the dirty transmit buffer pointer. */
   8135 	txq->txq_sdirty = i;
   8136 	DPRINTF(WM_DEBUG_TX,
   8137 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8138 
   8139 	if (count != 0)
   8140 		rnd_add_uint32(&sc->rnd_source, count);
   8141 
   8142 	/*
   8143 	 * If there are no more pending transmissions, cancel the watchdog
   8144 	 * timer.
   8145 	 */
   8146 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8147 		txq->txq_watchdog = false;
   8148 
   8149 	return more;
   8150 }
   8151 
   8152 static inline uint32_t
   8153 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8154 {
   8155 	struct wm_softc *sc = rxq->rxq_sc;
   8156 
   8157 	if (sc->sc_type == WM_T_82574)
   8158 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8159 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8160 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8161 	else
   8162 		return rxq->rxq_descs[idx].wrx_status;
   8163 }
   8164 
   8165 static inline uint32_t
   8166 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8167 {
   8168 	struct wm_softc *sc = rxq->rxq_sc;
   8169 
   8170 	if (sc->sc_type == WM_T_82574)
   8171 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8172 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8173 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8174 	else
   8175 		return rxq->rxq_descs[idx].wrx_errors;
   8176 }
   8177 
   8178 static inline uint16_t
   8179 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8180 {
   8181 	struct wm_softc *sc = rxq->rxq_sc;
   8182 
   8183 	if (sc->sc_type == WM_T_82574)
   8184 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8185 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8186 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8187 	else
   8188 		return rxq->rxq_descs[idx].wrx_special;
   8189 }
   8190 
   8191 static inline int
   8192 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8193 {
   8194 	struct wm_softc *sc = rxq->rxq_sc;
   8195 
   8196 	if (sc->sc_type == WM_T_82574)
   8197 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8198 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8199 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8200 	else
   8201 		return rxq->rxq_descs[idx].wrx_len;
   8202 }
   8203 
   8204 #ifdef WM_DEBUG
   8205 static inline uint32_t
   8206 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8207 {
   8208 	struct wm_softc *sc = rxq->rxq_sc;
   8209 
   8210 	if (sc->sc_type == WM_T_82574)
   8211 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8212 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8213 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8214 	else
   8215 		return 0;
   8216 }
   8217 
   8218 static inline uint8_t
   8219 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8220 {
   8221 	struct wm_softc *sc = rxq->rxq_sc;
   8222 
   8223 	if (sc->sc_type == WM_T_82574)
   8224 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8225 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8226 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8227 	else
   8228 		return 0;
   8229 }
   8230 #endif /* WM_DEBUG */
   8231 
   8232 static inline bool
   8233 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8234     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8235 {
   8236 
   8237 	if (sc->sc_type == WM_T_82574)
   8238 		return (status & ext_bit) != 0;
   8239 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8240 		return (status & nq_bit) != 0;
   8241 	else
   8242 		return (status & legacy_bit) != 0;
   8243 }
   8244 
   8245 static inline bool
   8246 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8247     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8248 {
   8249 
   8250 	if (sc->sc_type == WM_T_82574)
   8251 		return (error & ext_bit) != 0;
   8252 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8253 		return (error & nq_bit) != 0;
   8254 	else
   8255 		return (error & legacy_bit) != 0;
   8256 }
   8257 
   8258 static inline bool
   8259 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8260 {
   8261 
   8262 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8263 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8264 		return true;
   8265 	else
   8266 		return false;
   8267 }
   8268 
   8269 static inline bool
   8270 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8271 {
   8272 	struct wm_softc *sc = rxq->rxq_sc;
   8273 
   8274 	/* XXXX missing error bit for newqueue? */
   8275 	if (wm_rxdesc_is_set_error(sc, errors,
   8276 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8277 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8278 		NQRXC_ERROR_RXE)) {
   8279 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8280 			log(LOG_WARNING, "%s: symbol error\n",
   8281 			    device_xname(sc->sc_dev));
   8282 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8283 			log(LOG_WARNING, "%s: receive sequence error\n",
   8284 			    device_xname(sc->sc_dev));
   8285 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8286 			log(LOG_WARNING, "%s: CRC error\n",
   8287 			    device_xname(sc->sc_dev));
   8288 		return true;
   8289 	}
   8290 
   8291 	return false;
   8292 }
   8293 
   8294 static inline bool
   8295 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8296 {
   8297 	struct wm_softc *sc = rxq->rxq_sc;
   8298 
   8299 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8300 		NQRXC_STATUS_DD)) {
   8301 		/* We have processed all of the receive descriptors. */
   8302 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8303 		return false;
   8304 	}
   8305 
   8306 	return true;
   8307 }
   8308 
   8309 static inline bool
   8310 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8311     struct mbuf *m)
   8312 {
   8313 
   8314 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8315 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8316 		vlan_set_tag(m, le16toh(vlantag));
   8317 	}
   8318 
   8319 	return true;
   8320 }
   8321 
   8322 static inline void
   8323 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8324     uint32_t errors, struct mbuf *m)
   8325 {
   8326 	struct wm_softc *sc = rxq->rxq_sc;
   8327 
   8328 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8329 		if (wm_rxdesc_is_set_status(sc, status,
   8330 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8331 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8332 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8333 			if (wm_rxdesc_is_set_error(sc, errors,
   8334 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8335 				m->m_pkthdr.csum_flags |=
   8336 					M_CSUM_IPv4_BAD;
   8337 		}
   8338 		if (wm_rxdesc_is_set_status(sc, status,
   8339 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8340 			/*
   8341 			 * Note: we don't know if this was TCP or UDP,
   8342 			 * so we just set both bits, and expect the
   8343 			 * upper layers to deal.
   8344 			 */
   8345 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8346 			m->m_pkthdr.csum_flags |=
   8347 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8348 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8349 			if (wm_rxdesc_is_set_error(sc, errors,
   8350 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8351 				m->m_pkthdr.csum_flags |=
   8352 					M_CSUM_TCP_UDP_BAD;
   8353 		}
   8354 	}
   8355 }
   8356 
   8357 /*
   8358  * wm_rxeof:
   8359  *
   8360  *	Helper; handle receive interrupts.
   8361  */
   8362 static bool
   8363 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8364 {
   8365 	struct wm_softc *sc = rxq->rxq_sc;
   8366 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8367 	struct wm_rxsoft *rxs;
   8368 	struct mbuf *m;
   8369 	int i, len;
   8370 	int count = 0;
   8371 	uint32_t status, errors;
   8372 	uint16_t vlantag;
   8373 	bool more = false;
   8374 
   8375 	KASSERT(mutex_owned(rxq->rxq_lock));
   8376 
   8377 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8378 		if (limit-- == 0) {
   8379 			rxq->rxq_ptr = i;
   8380 			more = true;
   8381 			DPRINTF(WM_DEBUG_RX,
   8382 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8383 				device_xname(sc->sc_dev), i));
   8384 			break;
   8385 		}
   8386 
   8387 		rxs = &rxq->rxq_soft[i];
   8388 
   8389 		DPRINTF(WM_DEBUG_RX,
   8390 		    ("%s: RX: checking descriptor %d\n",
   8391 		    device_xname(sc->sc_dev), i));
   8392 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8393 
   8394 		status = wm_rxdesc_get_status(rxq, i);
   8395 		errors = wm_rxdesc_get_errors(rxq, i);
   8396 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8397 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8398 #ifdef WM_DEBUG
   8399 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8400 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8401 #endif
   8402 
   8403 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8404 			/*
   8405 			 * Update the receive pointer holding rxq_lock
   8406 			 * consistent with increment counter.
   8407 			 */
   8408 			rxq->rxq_ptr = i;
   8409 			break;
   8410 		}
   8411 
   8412 		count++;
   8413 		if (__predict_false(rxq->rxq_discard)) {
   8414 			DPRINTF(WM_DEBUG_RX,
   8415 			    ("%s: RX: discarding contents of descriptor %d\n",
   8416 			    device_xname(sc->sc_dev), i));
   8417 			wm_init_rxdesc(rxq, i);
   8418 			if (wm_rxdesc_is_eop(rxq, status)) {
   8419 				/* Reset our state. */
   8420 				DPRINTF(WM_DEBUG_RX,
   8421 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8422 				    device_xname(sc->sc_dev)));
   8423 				rxq->rxq_discard = 0;
   8424 			}
   8425 			continue;
   8426 		}
   8427 
   8428 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8429 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8430 
   8431 		m = rxs->rxs_mbuf;
   8432 
   8433 		/*
   8434 		 * Add a new receive buffer to the ring, unless of
   8435 		 * course the length is zero. Treat the latter as a
   8436 		 * failed mapping.
   8437 		 */
   8438 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8439 			/*
   8440 			 * Failed, throw away what we've done so
   8441 			 * far, and discard the rest of the packet.
   8442 			 */
   8443 			ifp->if_ierrors++;
   8444 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8445 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8446 			wm_init_rxdesc(rxq, i);
   8447 			if (!wm_rxdesc_is_eop(rxq, status))
   8448 				rxq->rxq_discard = 1;
   8449 			if (rxq->rxq_head != NULL)
   8450 				m_freem(rxq->rxq_head);
   8451 			WM_RXCHAIN_RESET(rxq);
   8452 			DPRINTF(WM_DEBUG_RX,
   8453 			    ("%s: RX: Rx buffer allocation failed, "
   8454 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8455 			    rxq->rxq_discard ? " (discard)" : ""));
   8456 			continue;
   8457 		}
   8458 
   8459 		m->m_len = len;
   8460 		rxq->rxq_len += len;
   8461 		DPRINTF(WM_DEBUG_RX,
   8462 		    ("%s: RX: buffer at %p len %d\n",
   8463 		    device_xname(sc->sc_dev), m->m_data, len));
   8464 
   8465 		/* If this is not the end of the packet, keep looking. */
   8466 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8467 			WM_RXCHAIN_LINK(rxq, m);
   8468 			DPRINTF(WM_DEBUG_RX,
   8469 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8470 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8471 			continue;
   8472 		}
   8473 
   8474 		/*
   8475 		 * Okay, we have the entire packet now.  The chip is
   8476 		 * configured to include the FCS except I350 and I21[01]
   8477 		 * (not all chips can be configured to strip it),
   8478 		 * so we need to trim it.
   8479 		 * May need to adjust length of previous mbuf in the
   8480 		 * chain if the current mbuf is too short.
   8481 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8482 		 * is always set in I350, so we don't trim it.
   8483 		 */
   8484 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8485 		    && (sc->sc_type != WM_T_I210)
   8486 		    && (sc->sc_type != WM_T_I211)) {
   8487 			if (m->m_len < ETHER_CRC_LEN) {
   8488 				rxq->rxq_tail->m_len
   8489 				    -= (ETHER_CRC_LEN - m->m_len);
   8490 				m->m_len = 0;
   8491 			} else
   8492 				m->m_len -= ETHER_CRC_LEN;
   8493 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8494 		} else
   8495 			len = rxq->rxq_len;
   8496 
   8497 		WM_RXCHAIN_LINK(rxq, m);
   8498 
   8499 		*rxq->rxq_tailp = NULL;
   8500 		m = rxq->rxq_head;
   8501 
   8502 		WM_RXCHAIN_RESET(rxq);
   8503 
   8504 		DPRINTF(WM_DEBUG_RX,
   8505 		    ("%s: RX: have entire packet, len -> %d\n",
   8506 		    device_xname(sc->sc_dev), len));
   8507 
   8508 		/* If an error occurred, update stats and drop the packet. */
   8509 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8510 			m_freem(m);
   8511 			continue;
   8512 		}
   8513 
   8514 		/* No errors.  Receive the packet. */
   8515 		m_set_rcvif(m, ifp);
   8516 		m->m_pkthdr.len = len;
   8517 		/*
   8518 		 * TODO
   8519 		 * should be save rsshash and rsstype to this mbuf.
   8520 		 */
   8521 		DPRINTF(WM_DEBUG_RX,
   8522 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8523 			device_xname(sc->sc_dev), rsstype, rsshash));
   8524 
   8525 		/*
   8526 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8527 		 * for us.  Associate the tag with the packet.
   8528 		 */
   8529 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8530 			continue;
   8531 
   8532 		/* Set up checksum info for this packet. */
   8533 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8534 		/*
   8535 		 * Update the receive pointer holding rxq_lock consistent with
   8536 		 * increment counter.
   8537 		 */
   8538 		rxq->rxq_ptr = i;
   8539 		rxq->rxq_packets++;
   8540 		rxq->rxq_bytes += len;
   8541 		mutex_exit(rxq->rxq_lock);
   8542 
   8543 		/* Pass it on. */
   8544 		if_percpuq_enqueue(sc->sc_ipq, m);
   8545 
   8546 		mutex_enter(rxq->rxq_lock);
   8547 
   8548 		if (rxq->rxq_stopping)
   8549 			break;
   8550 	}
   8551 
   8552 	if (count != 0)
   8553 		rnd_add_uint32(&sc->rnd_source, count);
   8554 
   8555 	DPRINTF(WM_DEBUG_RX,
   8556 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8557 
   8558 	return more;
   8559 }
   8560 
   8561 /*
   8562  * wm_linkintr_gmii:
   8563  *
   8564  *	Helper; handle link interrupts for GMII.
   8565  */
   8566 static void
   8567 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8568 {
   8569 
   8570 	KASSERT(WM_CORE_LOCKED(sc));
   8571 
   8572 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8573 		__func__));
   8574 
   8575 	if (icr & ICR_LSC) {
   8576 		uint32_t reg;
   8577 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8578 
   8579 		if ((status & STATUS_LU) != 0) {
   8580 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8581 				device_xname(sc->sc_dev),
   8582 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8583 		} else {
   8584 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8585 				device_xname(sc->sc_dev)));
   8586 		}
   8587 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8588 			wm_gig_downshift_workaround_ich8lan(sc);
   8589 
   8590 		if ((sc->sc_type == WM_T_ICH8)
   8591 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8592 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8593 		}
   8594 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8595 			device_xname(sc->sc_dev)));
   8596 		mii_pollstat(&sc->sc_mii);
   8597 		if (sc->sc_type == WM_T_82543) {
   8598 			int miistatus, active;
   8599 
   8600 			/*
   8601 			 * With 82543, we need to force speed and
   8602 			 * duplex on the MAC equal to what the PHY
   8603 			 * speed and duplex configuration is.
   8604 			 */
   8605 			miistatus = sc->sc_mii.mii_media_status;
   8606 
   8607 			if (miistatus & IFM_ACTIVE) {
   8608 				active = sc->sc_mii.mii_media_active;
   8609 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8610 				switch (IFM_SUBTYPE(active)) {
   8611 				case IFM_10_T:
   8612 					sc->sc_ctrl |= CTRL_SPEED_10;
   8613 					break;
   8614 				case IFM_100_TX:
   8615 					sc->sc_ctrl |= CTRL_SPEED_100;
   8616 					break;
   8617 				case IFM_1000_T:
   8618 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8619 					break;
   8620 				default:
   8621 					/*
   8622 					 * fiber?
   8623 					 * Shoud not enter here.
   8624 					 */
   8625 					printf("unknown media (%x)\n", active);
   8626 					break;
   8627 				}
   8628 				if (active & IFM_FDX)
   8629 					sc->sc_ctrl |= CTRL_FD;
   8630 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8631 			}
   8632 		} else if (sc->sc_type == WM_T_PCH) {
   8633 			wm_k1_gig_workaround_hv(sc,
   8634 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8635 		}
   8636 
   8637 		if ((sc->sc_phytype == WMPHY_82578)
   8638 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8639 			== IFM_1000_T)) {
   8640 
   8641 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8642 				delay(200*1000); /* XXX too big */
   8643 
   8644 				/* Link stall fix for link up */
   8645 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8646 				    HV_MUX_DATA_CTRL,
   8647 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8648 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8649 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8650 				    HV_MUX_DATA_CTRL,
   8651 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8652 			}
   8653 		}
   8654 		/*
   8655 		 * I217 Packet Loss issue:
   8656 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8657 		 * on power up.
   8658 		 * Set the Beacon Duration for I217 to 8 usec
   8659 		 */
   8660 		if ((sc->sc_type == WM_T_PCH_LPT)
   8661 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8662 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8663 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8664 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8665 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8666 		}
   8667 
   8668 		/* XXX Work-around I218 hang issue */
   8669 		/* e1000_k1_workaround_lpt_lp() */
   8670 
   8671 		if ((sc->sc_type == WM_T_PCH_LPT)
   8672 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8673 			/*
   8674 			 * Set platform power management values for Latency
   8675 			 * Tolerance Reporting (LTR)
   8676 			 */
   8677 			wm_platform_pm_pch_lpt(sc,
   8678 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8679 				    != 0));
   8680 		}
   8681 
   8682 		/* FEXTNVM6 K1-off workaround */
   8683 		if (sc->sc_type == WM_T_PCH_SPT) {
   8684 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8685 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8686 			    & FEXTNVM6_K1_OFF_ENABLE)
   8687 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8688 			else
   8689 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8690 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8691 		}
   8692 	} else if (icr & ICR_RXSEQ) {
   8693 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8694 			device_xname(sc->sc_dev)));
   8695 	}
   8696 }
   8697 
   8698 /*
   8699  * wm_linkintr_tbi:
   8700  *
   8701  *	Helper; handle link interrupts for TBI mode.
   8702  */
   8703 static void
   8704 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8705 {
   8706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8707 	uint32_t status;
   8708 
   8709 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8710 		__func__));
   8711 
   8712 	status = CSR_READ(sc, WMREG_STATUS);
   8713 	if (icr & ICR_LSC) {
   8714 		if (status & STATUS_LU) {
   8715 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8716 			    device_xname(sc->sc_dev),
   8717 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8718 			/*
   8719 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8720 			 * so we should update sc->sc_ctrl
   8721 			 */
   8722 
   8723 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8724 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8725 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8726 			if (status & STATUS_FD)
   8727 				sc->sc_tctl |=
   8728 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8729 			else
   8730 				sc->sc_tctl |=
   8731 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8732 			if (sc->sc_ctrl & CTRL_TFCE)
   8733 				sc->sc_fcrtl |= FCRTL_XONE;
   8734 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8735 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8736 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8737 				      sc->sc_fcrtl);
   8738 			sc->sc_tbi_linkup = 1;
   8739 			if_link_state_change(ifp, LINK_STATE_UP);
   8740 		} else {
   8741 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8742 			    device_xname(sc->sc_dev)));
   8743 			sc->sc_tbi_linkup = 0;
   8744 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8745 		}
   8746 		/* Update LED */
   8747 		wm_tbi_serdes_set_linkled(sc);
   8748 	} else if (icr & ICR_RXSEQ) {
   8749 		DPRINTF(WM_DEBUG_LINK,
   8750 		    ("%s: LINK: Receive sequence error\n",
   8751 		    device_xname(sc->sc_dev)));
   8752 	}
   8753 }
   8754 
   8755 /*
   8756  * wm_linkintr_serdes:
   8757  *
   8758  *	Helper; handle link interrupts for TBI mode.
   8759  */
   8760 static void
   8761 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8762 {
   8763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8764 	struct mii_data *mii = &sc->sc_mii;
   8765 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8766 	uint32_t pcs_adv, pcs_lpab, reg;
   8767 
   8768 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8769 		__func__));
   8770 
   8771 	if (icr & ICR_LSC) {
   8772 		/* Check PCS */
   8773 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8774 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8775 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8776 				device_xname(sc->sc_dev)));
   8777 			mii->mii_media_status |= IFM_ACTIVE;
   8778 			sc->sc_tbi_linkup = 1;
   8779 			if_link_state_change(ifp, LINK_STATE_UP);
   8780 		} else {
   8781 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8782 				device_xname(sc->sc_dev)));
   8783 			mii->mii_media_status |= IFM_NONE;
   8784 			sc->sc_tbi_linkup = 0;
   8785 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8786 			wm_tbi_serdes_set_linkled(sc);
   8787 			return;
   8788 		}
   8789 		mii->mii_media_active |= IFM_1000_SX;
   8790 		if ((reg & PCS_LSTS_FDX) != 0)
   8791 			mii->mii_media_active |= IFM_FDX;
   8792 		else
   8793 			mii->mii_media_active |= IFM_HDX;
   8794 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8795 			/* Check flow */
   8796 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8797 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8798 				DPRINTF(WM_DEBUG_LINK,
   8799 				    ("XXX LINKOK but not ACOMP\n"));
   8800 				return;
   8801 			}
   8802 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8803 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8804 			DPRINTF(WM_DEBUG_LINK,
   8805 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8806 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8807 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8808 				mii->mii_media_active |= IFM_FLOW
   8809 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8810 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8811 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8812 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8813 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8814 				mii->mii_media_active |= IFM_FLOW
   8815 				    | IFM_ETH_TXPAUSE;
   8816 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8817 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8818 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8819 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8820 				mii->mii_media_active |= IFM_FLOW
   8821 				    | IFM_ETH_RXPAUSE;
   8822 		}
   8823 		/* Update LED */
   8824 		wm_tbi_serdes_set_linkled(sc);
   8825 	} else {
   8826 		DPRINTF(WM_DEBUG_LINK,
   8827 		    ("%s: LINK: Receive sequence error\n",
   8828 		    device_xname(sc->sc_dev)));
   8829 	}
   8830 }
   8831 
   8832 /*
   8833  * wm_linkintr:
   8834  *
   8835  *	Helper; handle link interrupts.
   8836  */
   8837 static void
   8838 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8839 {
   8840 
   8841 	KASSERT(WM_CORE_LOCKED(sc));
   8842 
   8843 	if (sc->sc_flags & WM_F_HAS_MII)
   8844 		wm_linkintr_gmii(sc, icr);
   8845 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8846 	    && (sc->sc_type >= WM_T_82575))
   8847 		wm_linkintr_serdes(sc, icr);
   8848 	else
   8849 		wm_linkintr_tbi(sc, icr);
   8850 }
   8851 
   8852 /*
   8853  * wm_intr_legacy:
   8854  *
   8855  *	Interrupt service routine for INTx and MSI.
   8856  */
   8857 static int
   8858 wm_intr_legacy(void *arg)
   8859 {
   8860 	struct wm_softc *sc = arg;
   8861 	struct wm_queue *wmq = &sc->sc_queue[0];
   8862 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8863 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8864 	uint32_t icr, rndval = 0;
   8865 	int handled = 0;
   8866 
   8867 	while (1 /* CONSTCOND */) {
   8868 		icr = CSR_READ(sc, WMREG_ICR);
   8869 		if ((icr & sc->sc_icr) == 0)
   8870 			break;
   8871 		if (handled == 0) {
   8872 			DPRINTF(WM_DEBUG_TX,
   8873 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8874 		}
   8875 		if (rndval == 0)
   8876 			rndval = icr;
   8877 
   8878 		mutex_enter(rxq->rxq_lock);
   8879 
   8880 		if (rxq->rxq_stopping) {
   8881 			mutex_exit(rxq->rxq_lock);
   8882 			break;
   8883 		}
   8884 
   8885 		handled = 1;
   8886 
   8887 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8888 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8889 			DPRINTF(WM_DEBUG_RX,
   8890 			    ("%s: RX: got Rx intr 0x%08x\n",
   8891 			    device_xname(sc->sc_dev),
   8892 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8893 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8894 		}
   8895 #endif
   8896 		/*
   8897 		 * wm_rxeof() does *not* call upper layer functions directly,
   8898 		 * as if_percpuq_enqueue() just call softint_schedule().
   8899 		 * So, we can call wm_rxeof() in interrupt context.
   8900 		 */
   8901 		wm_rxeof(rxq, UINT_MAX);
   8902 
   8903 		mutex_exit(rxq->rxq_lock);
   8904 		mutex_enter(txq->txq_lock);
   8905 
   8906 		if (txq->txq_stopping) {
   8907 			mutex_exit(txq->txq_lock);
   8908 			break;
   8909 		}
   8910 
   8911 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8912 		if (icr & ICR_TXDW) {
   8913 			DPRINTF(WM_DEBUG_TX,
   8914 			    ("%s: TX: got TXDW interrupt\n",
   8915 			    device_xname(sc->sc_dev)));
   8916 			WM_Q_EVCNT_INCR(txq, txdw);
   8917 		}
   8918 #endif
   8919 		wm_txeof(txq, UINT_MAX);
   8920 
   8921 		mutex_exit(txq->txq_lock);
   8922 		WM_CORE_LOCK(sc);
   8923 
   8924 		if (sc->sc_core_stopping) {
   8925 			WM_CORE_UNLOCK(sc);
   8926 			break;
   8927 		}
   8928 
   8929 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8930 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8931 			wm_linkintr(sc, icr);
   8932 		}
   8933 
   8934 		WM_CORE_UNLOCK(sc);
   8935 
   8936 		if (icr & ICR_RXO) {
   8937 #if defined(WM_DEBUG)
   8938 			log(LOG_WARNING, "%s: Receive overrun\n",
   8939 			    device_xname(sc->sc_dev));
   8940 #endif /* defined(WM_DEBUG) */
   8941 		}
   8942 	}
   8943 
   8944 	rnd_add_uint32(&sc->rnd_source, rndval);
   8945 
   8946 	if (handled) {
   8947 		/* Try to get more packets going. */
   8948 		softint_schedule(wmq->wmq_si);
   8949 	}
   8950 
   8951 	return handled;
   8952 }
   8953 
   8954 static inline void
   8955 wm_txrxintr_disable(struct wm_queue *wmq)
   8956 {
   8957 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8958 
   8959 	if (sc->sc_type == WM_T_82574)
   8960 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8961 	else if (sc->sc_type == WM_T_82575)
   8962 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8963 	else
   8964 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8965 }
   8966 
   8967 static inline void
   8968 wm_txrxintr_enable(struct wm_queue *wmq)
   8969 {
   8970 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8971 
   8972 	wm_itrs_calculate(sc, wmq);
   8973 
   8974 	/*
   8975 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   8976 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   8977 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   8978 	 * while each wm_handle_queue(wmq) is runnig.
   8979 	 */
   8980 	if (sc->sc_type == WM_T_82574)
   8981 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   8982 	else if (sc->sc_type == WM_T_82575)
   8983 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8984 	else
   8985 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8986 }
   8987 
   8988 static int
   8989 wm_txrxintr_msix(void *arg)
   8990 {
   8991 	struct wm_queue *wmq = arg;
   8992 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8993 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8994 	struct wm_softc *sc = txq->txq_sc;
   8995 	u_int txlimit = sc->sc_tx_intr_process_limit;
   8996 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   8997 	bool txmore;
   8998 	bool rxmore;
   8999 
   9000 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9001 
   9002 	DPRINTF(WM_DEBUG_TX,
   9003 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9004 
   9005 	wm_txrxintr_disable(wmq);
   9006 
   9007 	mutex_enter(txq->txq_lock);
   9008 
   9009 	if (txq->txq_stopping) {
   9010 		mutex_exit(txq->txq_lock);
   9011 		return 0;
   9012 	}
   9013 
   9014 	WM_Q_EVCNT_INCR(txq, txdw);
   9015 	txmore = wm_txeof(txq, txlimit);
   9016 	/* wm_deferred start() is done in wm_handle_queue(). */
   9017 	mutex_exit(txq->txq_lock);
   9018 
   9019 	DPRINTF(WM_DEBUG_RX,
   9020 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9021 	mutex_enter(rxq->rxq_lock);
   9022 
   9023 	if (rxq->rxq_stopping) {
   9024 		mutex_exit(rxq->rxq_lock);
   9025 		return 0;
   9026 	}
   9027 
   9028 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9029 	rxmore = wm_rxeof(rxq, rxlimit);
   9030 	mutex_exit(rxq->rxq_lock);
   9031 
   9032 	wm_itrs_writereg(sc, wmq);
   9033 
   9034 	if (txmore || rxmore)
   9035 		softint_schedule(wmq->wmq_si);
   9036 	else
   9037 		wm_txrxintr_enable(wmq);
   9038 
   9039 	return 1;
   9040 }
   9041 
   9042 static void
   9043 wm_handle_queue(void *arg)
   9044 {
   9045 	struct wm_queue *wmq = arg;
   9046 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9047 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9048 	struct wm_softc *sc = txq->txq_sc;
   9049 	u_int txlimit = sc->sc_tx_process_limit;
   9050 	u_int rxlimit = sc->sc_rx_process_limit;
   9051 	bool txmore;
   9052 	bool rxmore;
   9053 
   9054 	mutex_enter(txq->txq_lock);
   9055 	if (txq->txq_stopping) {
   9056 		mutex_exit(txq->txq_lock);
   9057 		return;
   9058 	}
   9059 	txmore = wm_txeof(txq, txlimit);
   9060 	wm_deferred_start_locked(txq);
   9061 	mutex_exit(txq->txq_lock);
   9062 
   9063 	mutex_enter(rxq->rxq_lock);
   9064 	if (rxq->rxq_stopping) {
   9065 		mutex_exit(rxq->rxq_lock);
   9066 		return;
   9067 	}
   9068 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9069 	rxmore = wm_rxeof(rxq, rxlimit);
   9070 	mutex_exit(rxq->rxq_lock);
   9071 
   9072 	if (txmore || rxmore)
   9073 		softint_schedule(wmq->wmq_si);
   9074 	else
   9075 		wm_txrxintr_enable(wmq);
   9076 }
   9077 
   9078 /*
   9079  * wm_linkintr_msix:
   9080  *
   9081  *	Interrupt service routine for link status change for MSI-X.
   9082  */
   9083 static int
   9084 wm_linkintr_msix(void *arg)
   9085 {
   9086 	struct wm_softc *sc = arg;
   9087 	uint32_t reg;
   9088 	bool has_rxo;
   9089 
   9090 	DPRINTF(WM_DEBUG_LINK,
   9091 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9092 
   9093 	reg = CSR_READ(sc, WMREG_ICR);
   9094 	WM_CORE_LOCK(sc);
   9095 	if (sc->sc_core_stopping)
   9096 		goto out;
   9097 
   9098 	if((reg & ICR_LSC) != 0) {
   9099 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9100 		wm_linkintr(sc, ICR_LSC);
   9101 	}
   9102 
   9103 	/*
   9104 	 * XXX 82574 MSI-X mode workaround
   9105 	 *
   9106 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9107 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9108 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9109 	 * interrupts by writing WMREG_ICS to process receive packets.
   9110 	 */
   9111 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9112 #if defined(WM_DEBUG)
   9113 		log(LOG_WARNING, "%s: Receive overrun\n",
   9114 		    device_xname(sc->sc_dev));
   9115 #endif /* defined(WM_DEBUG) */
   9116 
   9117 		has_rxo = true;
   9118 		/*
   9119 		 * The RXO interrupt is very high rate when receive traffic is
   9120 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9121 		 * interrupts. ICR_OTHER will be enabled at the end of
   9122 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9123 		 * ICR_RXQ(1) interrupts.
   9124 		 */
   9125 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9126 
   9127 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9128 	}
   9129 
   9130 
   9131 
   9132 out:
   9133 	WM_CORE_UNLOCK(sc);
   9134 
   9135 	if (sc->sc_type == WM_T_82574) {
   9136 		if (!has_rxo)
   9137 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9138 		else
   9139 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9140 	} else if (sc->sc_type == WM_T_82575)
   9141 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9142 	else
   9143 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9144 
   9145 	return 1;
   9146 }
   9147 
   9148 /*
   9149  * Media related.
   9150  * GMII, SGMII, TBI (and SERDES)
   9151  */
   9152 
   9153 /* Common */
   9154 
   9155 /*
   9156  * wm_tbi_serdes_set_linkled:
   9157  *
   9158  *	Update the link LED on TBI and SERDES devices.
   9159  */
   9160 static void
   9161 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9162 {
   9163 
   9164 	if (sc->sc_tbi_linkup)
   9165 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9166 	else
   9167 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9168 
   9169 	/* 82540 or newer devices are active low */
   9170 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9171 
   9172 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9173 }
   9174 
   9175 /* GMII related */
   9176 
   9177 /*
   9178  * wm_gmii_reset:
   9179  *
   9180  *	Reset the PHY.
   9181  */
   9182 static void
   9183 wm_gmii_reset(struct wm_softc *sc)
   9184 {
   9185 	uint32_t reg;
   9186 	int rv;
   9187 
   9188 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9189 		device_xname(sc->sc_dev), __func__));
   9190 
   9191 	rv = sc->phy.acquire(sc);
   9192 	if (rv != 0) {
   9193 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9194 		    __func__);
   9195 		return;
   9196 	}
   9197 
   9198 	switch (sc->sc_type) {
   9199 	case WM_T_82542_2_0:
   9200 	case WM_T_82542_2_1:
   9201 		/* null */
   9202 		break;
   9203 	case WM_T_82543:
   9204 		/*
   9205 		 * With 82543, we need to force speed and duplex on the MAC
   9206 		 * equal to what the PHY speed and duplex configuration is.
   9207 		 * In addition, we need to perform a hardware reset on the PHY
   9208 		 * to take it out of reset.
   9209 		 */
   9210 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9211 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9212 
   9213 		/* The PHY reset pin is active-low. */
   9214 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9215 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9216 		    CTRL_EXT_SWDPIN(4));
   9217 		reg |= CTRL_EXT_SWDPIO(4);
   9218 
   9219 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9220 		CSR_WRITE_FLUSH(sc);
   9221 		delay(10*1000);
   9222 
   9223 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9224 		CSR_WRITE_FLUSH(sc);
   9225 		delay(150);
   9226 #if 0
   9227 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9228 #endif
   9229 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9230 		break;
   9231 	case WM_T_82544:	/* reset 10000us */
   9232 	case WM_T_82540:
   9233 	case WM_T_82545:
   9234 	case WM_T_82545_3:
   9235 	case WM_T_82546:
   9236 	case WM_T_82546_3:
   9237 	case WM_T_82541:
   9238 	case WM_T_82541_2:
   9239 	case WM_T_82547:
   9240 	case WM_T_82547_2:
   9241 	case WM_T_82571:	/* reset 100us */
   9242 	case WM_T_82572:
   9243 	case WM_T_82573:
   9244 	case WM_T_82574:
   9245 	case WM_T_82575:
   9246 	case WM_T_82576:
   9247 	case WM_T_82580:
   9248 	case WM_T_I350:
   9249 	case WM_T_I354:
   9250 	case WM_T_I210:
   9251 	case WM_T_I211:
   9252 	case WM_T_82583:
   9253 	case WM_T_80003:
   9254 		/* generic reset */
   9255 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9256 		CSR_WRITE_FLUSH(sc);
   9257 		delay(20000);
   9258 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9259 		CSR_WRITE_FLUSH(sc);
   9260 		delay(20000);
   9261 
   9262 		if ((sc->sc_type == WM_T_82541)
   9263 		    || (sc->sc_type == WM_T_82541_2)
   9264 		    || (sc->sc_type == WM_T_82547)
   9265 		    || (sc->sc_type == WM_T_82547_2)) {
   9266 			/* workaround for igp are done in igp_reset() */
   9267 			/* XXX add code to set LED after phy reset */
   9268 		}
   9269 		break;
   9270 	case WM_T_ICH8:
   9271 	case WM_T_ICH9:
   9272 	case WM_T_ICH10:
   9273 	case WM_T_PCH:
   9274 	case WM_T_PCH2:
   9275 	case WM_T_PCH_LPT:
   9276 	case WM_T_PCH_SPT:
   9277 		/* generic reset */
   9278 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9279 		CSR_WRITE_FLUSH(sc);
   9280 		delay(100);
   9281 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9282 		CSR_WRITE_FLUSH(sc);
   9283 		delay(150);
   9284 		break;
   9285 	default:
   9286 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9287 		    __func__);
   9288 		break;
   9289 	}
   9290 
   9291 	sc->phy.release(sc);
   9292 
   9293 	/* get_cfg_done */
   9294 	wm_get_cfg_done(sc);
   9295 
   9296 	/* extra setup */
   9297 	switch (sc->sc_type) {
   9298 	case WM_T_82542_2_0:
   9299 	case WM_T_82542_2_1:
   9300 	case WM_T_82543:
   9301 	case WM_T_82544:
   9302 	case WM_T_82540:
   9303 	case WM_T_82545:
   9304 	case WM_T_82545_3:
   9305 	case WM_T_82546:
   9306 	case WM_T_82546_3:
   9307 	case WM_T_82541_2:
   9308 	case WM_T_82547_2:
   9309 	case WM_T_82571:
   9310 	case WM_T_82572:
   9311 	case WM_T_82573:
   9312 	case WM_T_82574:
   9313 	case WM_T_82583:
   9314 	case WM_T_82575:
   9315 	case WM_T_82576:
   9316 	case WM_T_82580:
   9317 	case WM_T_I350:
   9318 	case WM_T_I354:
   9319 	case WM_T_I210:
   9320 	case WM_T_I211:
   9321 	case WM_T_80003:
   9322 		/* null */
   9323 		break;
   9324 	case WM_T_82541:
   9325 	case WM_T_82547:
   9326 		/* XXX Configure actively LED after PHY reset */
   9327 		break;
   9328 	case WM_T_ICH8:
   9329 	case WM_T_ICH9:
   9330 	case WM_T_ICH10:
   9331 	case WM_T_PCH:
   9332 	case WM_T_PCH2:
   9333 	case WM_T_PCH_LPT:
   9334 	case WM_T_PCH_SPT:
   9335 		wm_phy_post_reset(sc);
   9336 		break;
   9337 	default:
   9338 		panic("%s: unknown type\n", __func__);
   9339 		break;
   9340 	}
   9341 }
   9342 
   9343 /*
   9344  * Setup sc_phytype and mii_{read|write}reg.
   9345  *
   9346  *  To identify PHY type, correct read/write function should be selected.
   9347  * To select correct read/write function, PCI ID or MAC type are required
   9348  * without accessing PHY registers.
   9349  *
   9350  *  On the first call of this function, PHY ID is not known yet. Check
   9351  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9352  * result might be incorrect.
   9353  *
   9354  *  In the second call, PHY OUI and model is used to identify PHY type.
   9355  * It might not be perfpect because of the lack of compared entry, but it
   9356  * would be better than the first call.
   9357  *
   9358  *  If the detected new result and previous assumption is different,
   9359  * diagnous message will be printed.
   9360  */
   9361 static void
   9362 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9363     uint16_t phy_model)
   9364 {
   9365 	device_t dev = sc->sc_dev;
   9366 	struct mii_data *mii = &sc->sc_mii;
   9367 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9368 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9369 	mii_readreg_t new_readreg;
   9370 	mii_writereg_t new_writereg;
   9371 
   9372 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9373 		device_xname(sc->sc_dev), __func__));
   9374 
   9375 	if (mii->mii_readreg == NULL) {
   9376 		/*
   9377 		 *  This is the first call of this function. For ICH and PCH
   9378 		 * variants, it's difficult to determine the PHY access method
   9379 		 * by sc_type, so use the PCI product ID for some devices.
   9380 		 */
   9381 
   9382 		switch (sc->sc_pcidevid) {
   9383 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9384 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9385 			/* 82577 */
   9386 			new_phytype = WMPHY_82577;
   9387 			break;
   9388 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9389 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9390 			/* 82578 */
   9391 			new_phytype = WMPHY_82578;
   9392 			break;
   9393 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9394 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9395 			/* 82579 */
   9396 			new_phytype = WMPHY_82579;
   9397 			break;
   9398 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9399 		case PCI_PRODUCT_INTEL_82801I_BM:
   9400 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9401 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9402 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9403 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9404 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9405 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9406 			/* ICH8, 9, 10 with 82567 */
   9407 			new_phytype = WMPHY_BM;
   9408 			break;
   9409 		default:
   9410 			break;
   9411 		}
   9412 	} else {
   9413 		/* It's not the first call. Use PHY OUI and model */
   9414 		switch (phy_oui) {
   9415 		case MII_OUI_ATHEROS: /* XXX ??? */
   9416 			switch (phy_model) {
   9417 			case 0x0004: /* XXX */
   9418 				new_phytype = WMPHY_82578;
   9419 				break;
   9420 			default:
   9421 				break;
   9422 			}
   9423 			break;
   9424 		case MII_OUI_xxMARVELL:
   9425 			switch (phy_model) {
   9426 			case MII_MODEL_xxMARVELL_I210:
   9427 				new_phytype = WMPHY_I210;
   9428 				break;
   9429 			case MII_MODEL_xxMARVELL_E1011:
   9430 			case MII_MODEL_xxMARVELL_E1000_3:
   9431 			case MII_MODEL_xxMARVELL_E1000_5:
   9432 			case MII_MODEL_xxMARVELL_E1112:
   9433 				new_phytype = WMPHY_M88;
   9434 				break;
   9435 			case MII_MODEL_xxMARVELL_E1149:
   9436 				new_phytype = WMPHY_BM;
   9437 				break;
   9438 			case MII_MODEL_xxMARVELL_E1111:
   9439 			case MII_MODEL_xxMARVELL_I347:
   9440 			case MII_MODEL_xxMARVELL_E1512:
   9441 			case MII_MODEL_xxMARVELL_E1340M:
   9442 			case MII_MODEL_xxMARVELL_E1543:
   9443 				new_phytype = WMPHY_M88;
   9444 				break;
   9445 			case MII_MODEL_xxMARVELL_I82563:
   9446 				new_phytype = WMPHY_GG82563;
   9447 				break;
   9448 			default:
   9449 				break;
   9450 			}
   9451 			break;
   9452 		case MII_OUI_INTEL:
   9453 			switch (phy_model) {
   9454 			case MII_MODEL_INTEL_I82577:
   9455 				new_phytype = WMPHY_82577;
   9456 				break;
   9457 			case MII_MODEL_INTEL_I82579:
   9458 				new_phytype = WMPHY_82579;
   9459 				break;
   9460 			case MII_MODEL_INTEL_I217:
   9461 				new_phytype = WMPHY_I217;
   9462 				break;
   9463 			case MII_MODEL_INTEL_I82580:
   9464 			case MII_MODEL_INTEL_I350:
   9465 				new_phytype = WMPHY_82580;
   9466 				break;
   9467 			default:
   9468 				break;
   9469 			}
   9470 			break;
   9471 		case MII_OUI_yyINTEL:
   9472 			switch (phy_model) {
   9473 			case MII_MODEL_yyINTEL_I82562G:
   9474 			case MII_MODEL_yyINTEL_I82562EM:
   9475 			case MII_MODEL_yyINTEL_I82562ET:
   9476 				new_phytype = WMPHY_IFE;
   9477 				break;
   9478 			case MII_MODEL_yyINTEL_IGP01E1000:
   9479 				new_phytype = WMPHY_IGP;
   9480 				break;
   9481 			case MII_MODEL_yyINTEL_I82566:
   9482 				new_phytype = WMPHY_IGP_3;
   9483 				break;
   9484 			default:
   9485 				break;
   9486 			}
   9487 			break;
   9488 		default:
   9489 			break;
   9490 		}
   9491 		if (new_phytype == WMPHY_UNKNOWN)
   9492 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9493 			    __func__);
   9494 
   9495 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9496 		    && (sc->sc_phytype != new_phytype )) {
   9497 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9498 			    "was incorrect. PHY type from PHY ID = %u\n",
   9499 			    sc->sc_phytype, new_phytype);
   9500 		}
   9501 	}
   9502 
   9503 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9504 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9505 		/* SGMII */
   9506 		new_readreg = wm_sgmii_readreg;
   9507 		new_writereg = wm_sgmii_writereg;
   9508 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9509 		/* BM2 (phyaddr == 1) */
   9510 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9511 		    && (new_phytype != WMPHY_BM)
   9512 		    && (new_phytype != WMPHY_UNKNOWN))
   9513 			doubt_phytype = new_phytype;
   9514 		new_phytype = WMPHY_BM;
   9515 		new_readreg = wm_gmii_bm_readreg;
   9516 		new_writereg = wm_gmii_bm_writereg;
   9517 	} else if (sc->sc_type >= WM_T_PCH) {
   9518 		/* All PCH* use _hv_ */
   9519 		new_readreg = wm_gmii_hv_readreg;
   9520 		new_writereg = wm_gmii_hv_writereg;
   9521 	} else if (sc->sc_type >= WM_T_ICH8) {
   9522 		/* non-82567 ICH8, 9 and 10 */
   9523 		new_readreg = wm_gmii_i82544_readreg;
   9524 		new_writereg = wm_gmii_i82544_writereg;
   9525 	} else if (sc->sc_type >= WM_T_80003) {
   9526 		/* 80003 */
   9527 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9528 		    && (new_phytype != WMPHY_GG82563)
   9529 		    && (new_phytype != WMPHY_UNKNOWN))
   9530 			doubt_phytype = new_phytype;
   9531 		new_phytype = WMPHY_GG82563;
   9532 		new_readreg = wm_gmii_i80003_readreg;
   9533 		new_writereg = wm_gmii_i80003_writereg;
   9534 	} else if (sc->sc_type >= WM_T_I210) {
   9535 		/* I210 and I211 */
   9536 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9537 		    && (new_phytype != WMPHY_I210)
   9538 		    && (new_phytype != WMPHY_UNKNOWN))
   9539 			doubt_phytype = new_phytype;
   9540 		new_phytype = WMPHY_I210;
   9541 		new_readreg = wm_gmii_gs40g_readreg;
   9542 		new_writereg = wm_gmii_gs40g_writereg;
   9543 	} else if (sc->sc_type >= WM_T_82580) {
   9544 		/* 82580, I350 and I354 */
   9545 		new_readreg = wm_gmii_82580_readreg;
   9546 		new_writereg = wm_gmii_82580_writereg;
   9547 	} else if (sc->sc_type >= WM_T_82544) {
   9548 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9549 		new_readreg = wm_gmii_i82544_readreg;
   9550 		new_writereg = wm_gmii_i82544_writereg;
   9551 	} else {
   9552 		new_readreg = wm_gmii_i82543_readreg;
   9553 		new_writereg = wm_gmii_i82543_writereg;
   9554 	}
   9555 
   9556 	if (new_phytype == WMPHY_BM) {
   9557 		/* All BM use _bm_ */
   9558 		new_readreg = wm_gmii_bm_readreg;
   9559 		new_writereg = wm_gmii_bm_writereg;
   9560 	}
   9561 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9562 		/* All PCH* use _hv_ */
   9563 		new_readreg = wm_gmii_hv_readreg;
   9564 		new_writereg = wm_gmii_hv_writereg;
   9565 	}
   9566 
   9567 	/* Diag output */
   9568 	if (doubt_phytype != WMPHY_UNKNOWN)
   9569 		aprint_error_dev(dev, "Assumed new PHY type was "
   9570 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9571 		    new_phytype);
   9572 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9573 	    && (sc->sc_phytype != new_phytype ))
   9574 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9575 		    "was incorrect. New PHY type = %u\n",
   9576 		    sc->sc_phytype, new_phytype);
   9577 
   9578 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9579 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9580 
   9581 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9582 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9583 		    "function was incorrect.\n");
   9584 
   9585 	/* Update now */
   9586 	sc->sc_phytype = new_phytype;
   9587 	mii->mii_readreg = new_readreg;
   9588 	mii->mii_writereg = new_writereg;
   9589 }
   9590 
   9591 /*
   9592  * wm_get_phy_id_82575:
   9593  *
   9594  * Return PHY ID. Return -1 if it failed.
   9595  */
   9596 static int
   9597 wm_get_phy_id_82575(struct wm_softc *sc)
   9598 {
   9599 	uint32_t reg;
   9600 	int phyid = -1;
   9601 
   9602 	/* XXX */
   9603 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9604 		return -1;
   9605 
   9606 	if (wm_sgmii_uses_mdio(sc)) {
   9607 		switch (sc->sc_type) {
   9608 		case WM_T_82575:
   9609 		case WM_T_82576:
   9610 			reg = CSR_READ(sc, WMREG_MDIC);
   9611 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9612 			break;
   9613 		case WM_T_82580:
   9614 		case WM_T_I350:
   9615 		case WM_T_I354:
   9616 		case WM_T_I210:
   9617 		case WM_T_I211:
   9618 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9619 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9620 			break;
   9621 		default:
   9622 			return -1;
   9623 		}
   9624 	}
   9625 
   9626 	return phyid;
   9627 }
   9628 
   9629 
   9630 /*
   9631  * wm_gmii_mediainit:
   9632  *
   9633  *	Initialize media for use on 1000BASE-T devices.
   9634  */
   9635 static void
   9636 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9637 {
   9638 	device_t dev = sc->sc_dev;
   9639 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9640 	struct mii_data *mii = &sc->sc_mii;
   9641 	uint32_t reg;
   9642 
   9643 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9644 		device_xname(sc->sc_dev), __func__));
   9645 
   9646 	/* We have GMII. */
   9647 	sc->sc_flags |= WM_F_HAS_MII;
   9648 
   9649 	if (sc->sc_type == WM_T_80003)
   9650 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9651 	else
   9652 		sc->sc_tipg = TIPG_1000T_DFLT;
   9653 
   9654 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9655 	if ((sc->sc_type == WM_T_82580)
   9656 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9657 	    || (sc->sc_type == WM_T_I211)) {
   9658 		reg = CSR_READ(sc, WMREG_PHPM);
   9659 		reg &= ~PHPM_GO_LINK_D;
   9660 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9661 	}
   9662 
   9663 	/*
   9664 	 * Let the chip set speed/duplex on its own based on
   9665 	 * signals from the PHY.
   9666 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9667 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9668 	 */
   9669 	sc->sc_ctrl |= CTRL_SLU;
   9670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9671 
   9672 	/* Initialize our media structures and probe the GMII. */
   9673 	mii->mii_ifp = ifp;
   9674 
   9675 	mii->mii_statchg = wm_gmii_statchg;
   9676 
   9677 	/* get PHY control from SMBus to PCIe */
   9678 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9679 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9680 		wm_smbustopci(sc);
   9681 
   9682 	wm_gmii_reset(sc);
   9683 
   9684 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9685 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9686 	    wm_gmii_mediastatus);
   9687 
   9688 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9689 	    || (sc->sc_type == WM_T_82580)
   9690 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9691 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9692 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9693 			/* Attach only one port */
   9694 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9695 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9696 		} else {
   9697 			int i, id;
   9698 			uint32_t ctrl_ext;
   9699 
   9700 			id = wm_get_phy_id_82575(sc);
   9701 			if (id != -1) {
   9702 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9703 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9704 			}
   9705 			if ((id == -1)
   9706 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9707 				/* Power on sgmii phy if it is disabled */
   9708 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9709 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9710 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9711 				CSR_WRITE_FLUSH(sc);
   9712 				delay(300*1000); /* XXX too long */
   9713 
   9714 				/* from 1 to 8 */
   9715 				for (i = 1; i < 8; i++)
   9716 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9717 					    0xffffffff, i, MII_OFFSET_ANY,
   9718 					    MIIF_DOPAUSE);
   9719 
   9720 				/* restore previous sfp cage power state */
   9721 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9722 			}
   9723 		}
   9724 	} else {
   9725 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9726 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9727 	}
   9728 
   9729 	/*
   9730 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9731 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9732 	 */
   9733 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9734 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9735 		wm_set_mdio_slow_mode_hv(sc);
   9736 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9737 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9738 	}
   9739 
   9740 	/*
   9741 	 * (For ICH8 variants)
   9742 	 * If PHY detection failed, use BM's r/w function and retry.
   9743 	 */
   9744 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9745 		/* if failed, retry with *_bm_* */
   9746 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9747 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9748 		    sc->sc_phytype);
   9749 		sc->sc_phytype = WMPHY_BM;
   9750 		mii->mii_readreg = wm_gmii_bm_readreg;
   9751 		mii->mii_writereg = wm_gmii_bm_writereg;
   9752 
   9753 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9754 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9755 	}
   9756 
   9757 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9758 		/* Any PHY wasn't find */
   9759 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9760 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9761 		sc->sc_phytype = WMPHY_NONE;
   9762 	} else {
   9763 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9764 
   9765 		/*
   9766 		 * PHY Found! Check PHY type again by the second call of
   9767 		 * wm_gmii_setup_phytype.
   9768 		 */
   9769 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9770 		    child->mii_mpd_model);
   9771 
   9772 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9773 	}
   9774 }
   9775 
   9776 /*
   9777  * wm_gmii_mediachange:	[ifmedia interface function]
   9778  *
   9779  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9780  */
   9781 static int
   9782 wm_gmii_mediachange(struct ifnet *ifp)
   9783 {
   9784 	struct wm_softc *sc = ifp->if_softc;
   9785 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9786 	int rc;
   9787 
   9788 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9789 		device_xname(sc->sc_dev), __func__));
   9790 	if ((ifp->if_flags & IFF_UP) == 0)
   9791 		return 0;
   9792 
   9793 	/* Disable D0 LPLU. */
   9794 	wm_lplu_d0_disable(sc);
   9795 
   9796 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9797 	sc->sc_ctrl |= CTRL_SLU;
   9798 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9799 	    || (sc->sc_type > WM_T_82543)) {
   9800 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9801 	} else {
   9802 		sc->sc_ctrl &= ~CTRL_ASDE;
   9803 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9804 		if (ife->ifm_media & IFM_FDX)
   9805 			sc->sc_ctrl |= CTRL_FD;
   9806 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9807 		case IFM_10_T:
   9808 			sc->sc_ctrl |= CTRL_SPEED_10;
   9809 			break;
   9810 		case IFM_100_TX:
   9811 			sc->sc_ctrl |= CTRL_SPEED_100;
   9812 			break;
   9813 		case IFM_1000_T:
   9814 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9815 			break;
   9816 		default:
   9817 			panic("wm_gmii_mediachange: bad media 0x%x",
   9818 			    ife->ifm_media);
   9819 		}
   9820 	}
   9821 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9822 	CSR_WRITE_FLUSH(sc);
   9823 	if (sc->sc_type <= WM_T_82543)
   9824 		wm_gmii_reset(sc);
   9825 
   9826 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9827 		return 0;
   9828 	return rc;
   9829 }
   9830 
   9831 /*
   9832  * wm_gmii_mediastatus:	[ifmedia interface function]
   9833  *
   9834  *	Get the current interface media status on a 1000BASE-T device.
   9835  */
   9836 static void
   9837 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9838 {
   9839 	struct wm_softc *sc = ifp->if_softc;
   9840 
   9841 	ether_mediastatus(ifp, ifmr);
   9842 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9843 	    | sc->sc_flowflags;
   9844 }
   9845 
   9846 #define	MDI_IO		CTRL_SWDPIN(2)
   9847 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9848 #define	MDI_CLK		CTRL_SWDPIN(3)
   9849 
   9850 static void
   9851 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9852 {
   9853 	uint32_t i, v;
   9854 
   9855 	v = CSR_READ(sc, WMREG_CTRL);
   9856 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9857 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9858 
   9859 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9860 		if (data & i)
   9861 			v |= MDI_IO;
   9862 		else
   9863 			v &= ~MDI_IO;
   9864 		CSR_WRITE(sc, WMREG_CTRL, v);
   9865 		CSR_WRITE_FLUSH(sc);
   9866 		delay(10);
   9867 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9868 		CSR_WRITE_FLUSH(sc);
   9869 		delay(10);
   9870 		CSR_WRITE(sc, WMREG_CTRL, v);
   9871 		CSR_WRITE_FLUSH(sc);
   9872 		delay(10);
   9873 	}
   9874 }
   9875 
   9876 static uint32_t
   9877 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9878 {
   9879 	uint32_t v, i, data = 0;
   9880 
   9881 	v = CSR_READ(sc, WMREG_CTRL);
   9882 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9883 	v |= CTRL_SWDPIO(3);
   9884 
   9885 	CSR_WRITE(sc, WMREG_CTRL, v);
   9886 	CSR_WRITE_FLUSH(sc);
   9887 	delay(10);
   9888 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9889 	CSR_WRITE_FLUSH(sc);
   9890 	delay(10);
   9891 	CSR_WRITE(sc, WMREG_CTRL, v);
   9892 	CSR_WRITE_FLUSH(sc);
   9893 	delay(10);
   9894 
   9895 	for (i = 0; i < 16; i++) {
   9896 		data <<= 1;
   9897 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9898 		CSR_WRITE_FLUSH(sc);
   9899 		delay(10);
   9900 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9901 			data |= 1;
   9902 		CSR_WRITE(sc, WMREG_CTRL, v);
   9903 		CSR_WRITE_FLUSH(sc);
   9904 		delay(10);
   9905 	}
   9906 
   9907 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9908 	CSR_WRITE_FLUSH(sc);
   9909 	delay(10);
   9910 	CSR_WRITE(sc, WMREG_CTRL, v);
   9911 	CSR_WRITE_FLUSH(sc);
   9912 	delay(10);
   9913 
   9914 	return data;
   9915 }
   9916 
   9917 #undef MDI_IO
   9918 #undef MDI_DIR
   9919 #undef MDI_CLK
   9920 
   9921 /*
   9922  * wm_gmii_i82543_readreg:	[mii interface function]
   9923  *
   9924  *	Read a PHY register on the GMII (i82543 version).
   9925  */
   9926 static int
   9927 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9928 {
   9929 	struct wm_softc *sc = device_private(dev);
   9930 	int rv;
   9931 
   9932 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9933 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9934 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9935 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9936 
   9937 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9938 	    device_xname(dev), phy, reg, rv));
   9939 
   9940 	return rv;
   9941 }
   9942 
   9943 /*
   9944  * wm_gmii_i82543_writereg:	[mii interface function]
   9945  *
   9946  *	Write a PHY register on the GMII (i82543 version).
   9947  */
   9948 static void
   9949 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9950 {
   9951 	struct wm_softc *sc = device_private(dev);
   9952 
   9953 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9954 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9955 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9956 	    (MII_COMMAND_START << 30), 32);
   9957 }
   9958 
   9959 /*
   9960  * wm_gmii_mdic_readreg:	[mii interface function]
   9961  *
   9962  *	Read a PHY register on the GMII.
   9963  */
   9964 static int
   9965 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9966 {
   9967 	struct wm_softc *sc = device_private(dev);
   9968 	uint32_t mdic = 0;
   9969 	int i, rv;
   9970 
   9971 	if (reg > MII_ADDRMASK) {
   9972 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9973 		    __func__, sc->sc_phytype, reg);
   9974 		reg &= MII_ADDRMASK;
   9975 	}
   9976 
   9977 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9978 	    MDIC_REGADD(reg));
   9979 
   9980 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9981 		mdic = CSR_READ(sc, WMREG_MDIC);
   9982 		if (mdic & MDIC_READY)
   9983 			break;
   9984 		delay(50);
   9985 	}
   9986 
   9987 	if ((mdic & MDIC_READY) == 0) {
   9988 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9989 		    device_xname(dev), phy, reg);
   9990 		rv = 0;
   9991 	} else if (mdic & MDIC_E) {
   9992 #if 0 /* This is normal if no PHY is present. */
   9993 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9994 		    device_xname(dev), phy, reg);
   9995 #endif
   9996 		rv = 0;
   9997 	} else {
   9998 		rv = MDIC_DATA(mdic);
   9999 		if (rv == 0xffff)
   10000 			rv = 0;
   10001 	}
   10002 
   10003 	return rv;
   10004 }
   10005 
   10006 /*
   10007  * wm_gmii_mdic_writereg:	[mii interface function]
   10008  *
   10009  *	Write a PHY register on the GMII.
   10010  */
   10011 static void
   10012 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10013 {
   10014 	struct wm_softc *sc = device_private(dev);
   10015 	uint32_t mdic = 0;
   10016 	int i;
   10017 
   10018 	if (reg > MII_ADDRMASK) {
   10019 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10020 		    __func__, sc->sc_phytype, reg);
   10021 		reg &= MII_ADDRMASK;
   10022 	}
   10023 
   10024 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10025 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10026 
   10027 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10028 		mdic = CSR_READ(sc, WMREG_MDIC);
   10029 		if (mdic & MDIC_READY)
   10030 			break;
   10031 		delay(50);
   10032 	}
   10033 
   10034 	if ((mdic & MDIC_READY) == 0)
   10035 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10036 		    device_xname(dev), phy, reg);
   10037 	else if (mdic & MDIC_E)
   10038 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10039 		    device_xname(dev), phy, reg);
   10040 }
   10041 
   10042 /*
   10043  * wm_gmii_i82544_readreg:	[mii interface function]
   10044  *
   10045  *	Read a PHY register on the GMII.
   10046  */
   10047 static int
   10048 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10049 {
   10050 	struct wm_softc *sc = device_private(dev);
   10051 	int rv;
   10052 
   10053 	if (sc->phy.acquire(sc)) {
   10054 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10055 		return 0;
   10056 	}
   10057 
   10058 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10059 		switch (sc->sc_phytype) {
   10060 		case WMPHY_IGP:
   10061 		case WMPHY_IGP_2:
   10062 		case WMPHY_IGP_3:
   10063 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10064 			break;
   10065 		default:
   10066 #ifdef WM_DEBUG
   10067 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10068 			    __func__, sc->sc_phytype, reg);
   10069 #endif
   10070 			break;
   10071 		}
   10072 	}
   10073 
   10074 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10075 	sc->phy.release(sc);
   10076 
   10077 	return rv;
   10078 }
   10079 
   10080 /*
   10081  * wm_gmii_i82544_writereg:	[mii interface function]
   10082  *
   10083  *	Write a PHY register on the GMII.
   10084  */
   10085 static void
   10086 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10087 {
   10088 	struct wm_softc *sc = device_private(dev);
   10089 
   10090 	if (sc->phy.acquire(sc)) {
   10091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10092 		return;
   10093 	}
   10094 
   10095 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10096 		switch (sc->sc_phytype) {
   10097 		case WMPHY_IGP:
   10098 		case WMPHY_IGP_2:
   10099 		case WMPHY_IGP_3:
   10100 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10101 			break;
   10102 		default:
   10103 #ifdef WM_DEBUG
   10104 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10105 			    __func__, sc->sc_phytype, reg);
   10106 #endif
   10107 			break;
   10108 		}
   10109 	}
   10110 
   10111 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10112 	sc->phy.release(sc);
   10113 }
   10114 
   10115 /*
   10116  * wm_gmii_i80003_readreg:	[mii interface function]
   10117  *
   10118  *	Read a PHY register on the kumeran
   10119  * This could be handled by the PHY layer if we didn't have to lock the
   10120  * ressource ...
   10121  */
   10122 static int
   10123 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10124 {
   10125 	struct wm_softc *sc = device_private(dev);
   10126 	int page_select, temp;
   10127 	int rv;
   10128 
   10129 	if (phy != 1) /* only one PHY on kumeran bus */
   10130 		return 0;
   10131 
   10132 	if (sc->phy.acquire(sc)) {
   10133 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10134 		return 0;
   10135 	}
   10136 
   10137 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10138 		page_select = GG82563_PHY_PAGE_SELECT;
   10139 	else {
   10140 		/*
   10141 		 * Use Alternative Page Select register to access registers
   10142 		 * 30 and 31.
   10143 		 */
   10144 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10145 	}
   10146 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10147 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10148 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10149 		/*
   10150 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10151 		 * register.
   10152 		 */
   10153 		delay(200);
   10154 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10155 			device_printf(dev, "%s failed\n", __func__);
   10156 			rv = 0; /* XXX */
   10157 			goto out;
   10158 		}
   10159 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10160 		delay(200);
   10161 	} else
   10162 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10163 
   10164 out:
   10165 	sc->phy.release(sc);
   10166 	return rv;
   10167 }
   10168 
   10169 /*
   10170  * wm_gmii_i80003_writereg:	[mii interface function]
   10171  *
   10172  *	Write a PHY register on the kumeran.
   10173  * This could be handled by the PHY layer if we didn't have to lock the
   10174  * ressource ...
   10175  */
   10176 static void
   10177 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10178 {
   10179 	struct wm_softc *sc = device_private(dev);
   10180 	int page_select, temp;
   10181 
   10182 	if (phy != 1) /* only one PHY on kumeran bus */
   10183 		return;
   10184 
   10185 	if (sc->phy.acquire(sc)) {
   10186 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10187 		return;
   10188 	}
   10189 
   10190 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10191 		page_select = GG82563_PHY_PAGE_SELECT;
   10192 	else {
   10193 		/*
   10194 		 * Use Alternative Page Select register to access registers
   10195 		 * 30 and 31.
   10196 		 */
   10197 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10198 	}
   10199 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10200 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10201 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10202 		/*
   10203 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10204 		 * register.
   10205 		 */
   10206 		delay(200);
   10207 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10208 			device_printf(dev, "%s failed\n", __func__);
   10209 			goto out;
   10210 		}
   10211 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10212 		delay(200);
   10213 	} else
   10214 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10215 
   10216 out:
   10217 	sc->phy.release(sc);
   10218 }
   10219 
   10220 /*
   10221  * wm_gmii_bm_readreg:	[mii interface function]
   10222  *
   10223  *	Read a PHY register on the kumeran
   10224  * This could be handled by the PHY layer if we didn't have to lock the
   10225  * ressource ...
   10226  */
   10227 static int
   10228 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10229 {
   10230 	struct wm_softc *sc = device_private(dev);
   10231 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10232 	uint16_t val;
   10233 	int rv;
   10234 
   10235 	if (sc->phy.acquire(sc)) {
   10236 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10237 		return 0;
   10238 	}
   10239 
   10240 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10241 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10242 		    || (reg == 31)) ? 1 : phy;
   10243 	/* Page 800 works differently than the rest so it has its own func */
   10244 	if (page == BM_WUC_PAGE) {
   10245 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10246 		rv = val;
   10247 		goto release;
   10248 	}
   10249 
   10250 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10251 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10252 		    && (sc->sc_type != WM_T_82583))
   10253 			wm_gmii_mdic_writereg(dev, phy,
   10254 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10255 		else
   10256 			wm_gmii_mdic_writereg(dev, phy,
   10257 			    BME1000_PHY_PAGE_SELECT, page);
   10258 	}
   10259 
   10260 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10261 
   10262 release:
   10263 	sc->phy.release(sc);
   10264 	return rv;
   10265 }
   10266 
   10267 /*
   10268  * wm_gmii_bm_writereg:	[mii interface function]
   10269  *
   10270  *	Write a PHY register on the kumeran.
   10271  * This could be handled by the PHY layer if we didn't have to lock the
   10272  * ressource ...
   10273  */
   10274 static void
   10275 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10276 {
   10277 	struct wm_softc *sc = device_private(dev);
   10278 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10279 
   10280 	if (sc->phy.acquire(sc)) {
   10281 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10282 		return;
   10283 	}
   10284 
   10285 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10286 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10287 		    || (reg == 31)) ? 1 : phy;
   10288 	/* Page 800 works differently than the rest so it has its own func */
   10289 	if (page == BM_WUC_PAGE) {
   10290 		uint16_t tmp;
   10291 
   10292 		tmp = val;
   10293 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10294 		goto release;
   10295 	}
   10296 
   10297 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10298 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10299 		    && (sc->sc_type != WM_T_82583))
   10300 			wm_gmii_mdic_writereg(dev, phy,
   10301 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10302 		else
   10303 			wm_gmii_mdic_writereg(dev, phy,
   10304 			    BME1000_PHY_PAGE_SELECT, page);
   10305 	}
   10306 
   10307 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10308 
   10309 release:
   10310 	sc->phy.release(sc);
   10311 }
   10312 
   10313 static void
   10314 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10315 {
   10316 	struct wm_softc *sc = device_private(dev);
   10317 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10318 	uint16_t wuce, reg;
   10319 
   10320 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10321 		device_xname(dev), __func__));
   10322 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10323 	if (sc->sc_type == WM_T_PCH) {
   10324 		/* XXX e1000 driver do nothing... why? */
   10325 	}
   10326 
   10327 	/*
   10328 	 * 1) Enable PHY wakeup register first.
   10329 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10330 	 */
   10331 
   10332 	/* Set page 769 */
   10333 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10334 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10335 
   10336 	/* Read WUCE and save it */
   10337 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10338 
   10339 	reg = wuce | BM_WUC_ENABLE_BIT;
   10340 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10341 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10342 
   10343 	/* Select page 800 */
   10344 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10345 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10346 
   10347 	/*
   10348 	 * 2) Access PHY wakeup register.
   10349 	 * See e1000_access_phy_wakeup_reg_bm.
   10350 	 */
   10351 
   10352 	/* Write page 800 */
   10353 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10354 
   10355 	if (rd)
   10356 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10357 	else
   10358 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10359 
   10360 	/*
   10361 	 * 3) Disable PHY wakeup register.
   10362 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10363 	 */
   10364 	/* Set page 769 */
   10365 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10366 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10367 
   10368 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10369 }
   10370 
   10371 /*
   10372  * wm_gmii_hv_readreg:	[mii interface function]
   10373  *
   10374  *	Read a PHY register on the kumeran
   10375  * This could be handled by the PHY layer if we didn't have to lock the
   10376  * ressource ...
   10377  */
   10378 static int
   10379 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10380 {
   10381 	struct wm_softc *sc = device_private(dev);
   10382 	int rv;
   10383 
   10384 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10385 		device_xname(dev), __func__));
   10386 	if (sc->phy.acquire(sc)) {
   10387 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10388 		return 0;
   10389 	}
   10390 
   10391 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10392 	sc->phy.release(sc);
   10393 	return rv;
   10394 }
   10395 
   10396 static int
   10397 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10398 {
   10399 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10400 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10401 	uint16_t val;
   10402 	int rv;
   10403 
   10404 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10405 
   10406 	/* Page 800 works differently than the rest so it has its own func */
   10407 	if (page == BM_WUC_PAGE) {
   10408 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10409 		return val;
   10410 	}
   10411 
   10412 	/*
   10413 	 * Lower than page 768 works differently than the rest so it has its
   10414 	 * own func
   10415 	 */
   10416 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10417 		printf("gmii_hv_readreg!!!\n");
   10418 		return 0;
   10419 	}
   10420 
   10421 	/*
   10422 	 * XXX I21[789] documents say that the SMBus Address register is at
   10423 	 * PHY address 01, Page 0 (not 768), Register 26.
   10424 	 */
   10425 	if (page == HV_INTC_FC_PAGE_START)
   10426 		page = 0;
   10427 
   10428 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10429 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10430 		    page << BME1000_PAGE_SHIFT);
   10431 	}
   10432 
   10433 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10434 	return rv;
   10435 }
   10436 
   10437 /*
   10438  * wm_gmii_hv_writereg:	[mii interface function]
   10439  *
   10440  *	Write a PHY register on the kumeran.
   10441  * This could be handled by the PHY layer if we didn't have to lock the
   10442  * ressource ...
   10443  */
   10444 static void
   10445 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10446 {
   10447 	struct wm_softc *sc = device_private(dev);
   10448 
   10449 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10450 		device_xname(dev), __func__));
   10451 
   10452 	if (sc->phy.acquire(sc)) {
   10453 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10454 		return;
   10455 	}
   10456 
   10457 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10458 	sc->phy.release(sc);
   10459 }
   10460 
   10461 static void
   10462 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10463 {
   10464 	struct wm_softc *sc = device_private(dev);
   10465 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10466 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10467 
   10468 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10469 
   10470 	/* Page 800 works differently than the rest so it has its own func */
   10471 	if (page == BM_WUC_PAGE) {
   10472 		uint16_t tmp;
   10473 
   10474 		tmp = val;
   10475 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10476 		return;
   10477 	}
   10478 
   10479 	/*
   10480 	 * Lower than page 768 works differently than the rest so it has its
   10481 	 * own func
   10482 	 */
   10483 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10484 		printf("gmii_hv_writereg!!!\n");
   10485 		return;
   10486 	}
   10487 
   10488 	{
   10489 		/*
   10490 		 * XXX I21[789] documents say that the SMBus Address register
   10491 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10492 		 */
   10493 		if (page == HV_INTC_FC_PAGE_START)
   10494 			page = 0;
   10495 
   10496 		/*
   10497 		 * XXX Workaround MDIO accesses being disabled after entering
   10498 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10499 		 * register is set)
   10500 		 */
   10501 		if (sc->sc_phytype == WMPHY_82578) {
   10502 			struct mii_softc *child;
   10503 
   10504 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10505 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10506 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10507 			    && ((val & (1 << 11)) != 0)) {
   10508 				printf("XXX need workaround\n");
   10509 			}
   10510 		}
   10511 
   10512 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10513 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10514 			    page << BME1000_PAGE_SHIFT);
   10515 		}
   10516 	}
   10517 
   10518 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10519 }
   10520 
   10521 /*
   10522  * wm_gmii_82580_readreg:	[mii interface function]
   10523  *
   10524  *	Read a PHY register on the 82580 and I350.
   10525  * This could be handled by the PHY layer if we didn't have to lock the
   10526  * ressource ...
   10527  */
   10528 static int
   10529 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10530 {
   10531 	struct wm_softc *sc = device_private(dev);
   10532 	int rv;
   10533 
   10534 	if (sc->phy.acquire(sc) != 0) {
   10535 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10536 		return 0;
   10537 	}
   10538 
   10539 #ifdef DIAGNOSTIC
   10540 	if (reg > MII_ADDRMASK) {
   10541 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10542 		    __func__, sc->sc_phytype, reg);
   10543 		reg &= MII_ADDRMASK;
   10544 	}
   10545 #endif
   10546 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10547 
   10548 	sc->phy.release(sc);
   10549 	return rv;
   10550 }
   10551 
   10552 /*
   10553  * wm_gmii_82580_writereg:	[mii interface function]
   10554  *
   10555  *	Write a PHY register on the 82580 and I350.
   10556  * This could be handled by the PHY layer if we didn't have to lock the
   10557  * ressource ...
   10558  */
   10559 static void
   10560 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10561 {
   10562 	struct wm_softc *sc = device_private(dev);
   10563 
   10564 	if (sc->phy.acquire(sc) != 0) {
   10565 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10566 		return;
   10567 	}
   10568 
   10569 #ifdef DIAGNOSTIC
   10570 	if (reg > MII_ADDRMASK) {
   10571 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10572 		    __func__, sc->sc_phytype, reg);
   10573 		reg &= MII_ADDRMASK;
   10574 	}
   10575 #endif
   10576 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10577 
   10578 	sc->phy.release(sc);
   10579 }
   10580 
   10581 /*
   10582  * wm_gmii_gs40g_readreg:	[mii interface function]
   10583  *
   10584  *	Read a PHY register on the I2100 and I211.
   10585  * This could be handled by the PHY layer if we didn't have to lock the
   10586  * ressource ...
   10587  */
   10588 static int
   10589 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10590 {
   10591 	struct wm_softc *sc = device_private(dev);
   10592 	int page, offset;
   10593 	int rv;
   10594 
   10595 	/* Acquire semaphore */
   10596 	if (sc->phy.acquire(sc)) {
   10597 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10598 		return 0;
   10599 	}
   10600 
   10601 	/* Page select */
   10602 	page = reg >> GS40G_PAGE_SHIFT;
   10603 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10604 
   10605 	/* Read reg */
   10606 	offset = reg & GS40G_OFFSET_MASK;
   10607 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10608 
   10609 	sc->phy.release(sc);
   10610 	return rv;
   10611 }
   10612 
   10613 /*
   10614  * wm_gmii_gs40g_writereg:	[mii interface function]
   10615  *
   10616  *	Write a PHY register on the I210 and I211.
   10617  * This could be handled by the PHY layer if we didn't have to lock the
   10618  * ressource ...
   10619  */
   10620 static void
   10621 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10622 {
   10623 	struct wm_softc *sc = device_private(dev);
   10624 	int page, offset;
   10625 
   10626 	/* Acquire semaphore */
   10627 	if (sc->phy.acquire(sc)) {
   10628 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10629 		return;
   10630 	}
   10631 
   10632 	/* Page select */
   10633 	page = reg >> GS40G_PAGE_SHIFT;
   10634 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10635 
   10636 	/* Write reg */
   10637 	offset = reg & GS40G_OFFSET_MASK;
   10638 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10639 
   10640 	/* Release semaphore */
   10641 	sc->phy.release(sc);
   10642 }
   10643 
   10644 /*
   10645  * wm_gmii_statchg:	[mii interface function]
   10646  *
   10647  *	Callback from MII layer when media changes.
   10648  */
   10649 static void
   10650 wm_gmii_statchg(struct ifnet *ifp)
   10651 {
   10652 	struct wm_softc *sc = ifp->if_softc;
   10653 	struct mii_data *mii = &sc->sc_mii;
   10654 
   10655 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10656 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10657 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10658 
   10659 	/*
   10660 	 * Get flow control negotiation result.
   10661 	 */
   10662 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10663 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10664 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10665 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10666 	}
   10667 
   10668 	if (sc->sc_flowflags & IFM_FLOW) {
   10669 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10670 			sc->sc_ctrl |= CTRL_TFCE;
   10671 			sc->sc_fcrtl |= FCRTL_XONE;
   10672 		}
   10673 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10674 			sc->sc_ctrl |= CTRL_RFCE;
   10675 	}
   10676 
   10677 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10678 		DPRINTF(WM_DEBUG_LINK,
   10679 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10680 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10681 	} else {
   10682 		DPRINTF(WM_DEBUG_LINK,
   10683 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10684 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10685 	}
   10686 
   10687 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10688 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10689 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10690 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10691 	if (sc->sc_type == WM_T_80003) {
   10692 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10693 		case IFM_1000_T:
   10694 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10695 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10696 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10697 			break;
   10698 		default:
   10699 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10700 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10701 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10702 			break;
   10703 		}
   10704 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10705 	}
   10706 }
   10707 
   10708 /* kumeran related (80003, ICH* and PCH*) */
   10709 
   10710 /*
   10711  * wm_kmrn_readreg:
   10712  *
   10713  *	Read a kumeran register
   10714  */
   10715 static int
   10716 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10717 {
   10718 	int rv;
   10719 
   10720 	if (sc->sc_type == WM_T_80003)
   10721 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10722 	else
   10723 		rv = sc->phy.acquire(sc);
   10724 	if (rv != 0) {
   10725 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10726 		    __func__);
   10727 		return rv;
   10728 	}
   10729 
   10730 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10731 
   10732 	if (sc->sc_type == WM_T_80003)
   10733 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10734 	else
   10735 		sc->phy.release(sc);
   10736 
   10737 	return rv;
   10738 }
   10739 
   10740 static int
   10741 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10742 {
   10743 
   10744 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10745 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10746 	    KUMCTRLSTA_REN);
   10747 	CSR_WRITE_FLUSH(sc);
   10748 	delay(2);
   10749 
   10750 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10751 
   10752 	return 0;
   10753 }
   10754 
   10755 /*
   10756  * wm_kmrn_writereg:
   10757  *
   10758  *	Write a kumeran register
   10759  */
   10760 static int
   10761 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10762 {
   10763 	int rv;
   10764 
   10765 	if (sc->sc_type == WM_T_80003)
   10766 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10767 	else
   10768 		rv = sc->phy.acquire(sc);
   10769 	if (rv != 0) {
   10770 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10771 		    __func__);
   10772 		return rv;
   10773 	}
   10774 
   10775 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10776 
   10777 	if (sc->sc_type == WM_T_80003)
   10778 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10779 	else
   10780 		sc->phy.release(sc);
   10781 
   10782 	return rv;
   10783 }
   10784 
   10785 static int
   10786 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10787 {
   10788 
   10789 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10790 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10791 
   10792 	return 0;
   10793 }
   10794 
   10795 /* SGMII related */
   10796 
   10797 /*
   10798  * wm_sgmii_uses_mdio
   10799  *
   10800  * Check whether the transaction is to the internal PHY or the external
   10801  * MDIO interface. Return true if it's MDIO.
   10802  */
   10803 static bool
   10804 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10805 {
   10806 	uint32_t reg;
   10807 	bool ismdio = false;
   10808 
   10809 	switch (sc->sc_type) {
   10810 	case WM_T_82575:
   10811 	case WM_T_82576:
   10812 		reg = CSR_READ(sc, WMREG_MDIC);
   10813 		ismdio = ((reg & MDIC_DEST) != 0);
   10814 		break;
   10815 	case WM_T_82580:
   10816 	case WM_T_I350:
   10817 	case WM_T_I354:
   10818 	case WM_T_I210:
   10819 	case WM_T_I211:
   10820 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10821 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10822 		break;
   10823 	default:
   10824 		break;
   10825 	}
   10826 
   10827 	return ismdio;
   10828 }
   10829 
   10830 /*
   10831  * wm_sgmii_readreg:	[mii interface function]
   10832  *
   10833  *	Read a PHY register on the SGMII
   10834  * This could be handled by the PHY layer if we didn't have to lock the
   10835  * ressource ...
   10836  */
   10837 static int
   10838 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10839 {
   10840 	struct wm_softc *sc = device_private(dev);
   10841 	uint32_t i2ccmd;
   10842 	int i, rv;
   10843 
   10844 	if (sc->phy.acquire(sc)) {
   10845 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10846 		return 0;
   10847 	}
   10848 
   10849 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10850 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10851 	    | I2CCMD_OPCODE_READ;
   10852 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10853 
   10854 	/* Poll the ready bit */
   10855 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10856 		delay(50);
   10857 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10858 		if (i2ccmd & I2CCMD_READY)
   10859 			break;
   10860 	}
   10861 	if ((i2ccmd & I2CCMD_READY) == 0)
   10862 		device_printf(dev, "I2CCMD Read did not complete\n");
   10863 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10864 		device_printf(dev, "I2CCMD Error bit set\n");
   10865 
   10866 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10867 
   10868 	sc->phy.release(sc);
   10869 	return rv;
   10870 }
   10871 
   10872 /*
   10873  * wm_sgmii_writereg:	[mii interface function]
   10874  *
   10875  *	Write a PHY register on the SGMII.
   10876  * This could be handled by the PHY layer if we didn't have to lock the
   10877  * ressource ...
   10878  */
   10879 static void
   10880 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10881 {
   10882 	struct wm_softc *sc = device_private(dev);
   10883 	uint32_t i2ccmd;
   10884 	int i;
   10885 	int val_swapped;
   10886 
   10887 	if (sc->phy.acquire(sc) != 0) {
   10888 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10889 		return;
   10890 	}
   10891 	/* Swap the data bytes for the I2C interface */
   10892 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10893 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10894 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10895 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10896 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10897 
   10898 	/* Poll the ready bit */
   10899 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10900 		delay(50);
   10901 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10902 		if (i2ccmd & I2CCMD_READY)
   10903 			break;
   10904 	}
   10905 	if ((i2ccmd & I2CCMD_READY) == 0)
   10906 		device_printf(dev, "I2CCMD Write did not complete\n");
   10907 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10908 		device_printf(dev, "I2CCMD Error bit set\n");
   10909 
   10910 	sc->phy.release(sc);
   10911 }
   10912 
   10913 /* TBI related */
   10914 
   10915 /*
   10916  * wm_tbi_mediainit:
   10917  *
   10918  *	Initialize media for use on 1000BASE-X devices.
   10919  */
   10920 static void
   10921 wm_tbi_mediainit(struct wm_softc *sc)
   10922 {
   10923 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10924 	const char *sep = "";
   10925 
   10926 	if (sc->sc_type < WM_T_82543)
   10927 		sc->sc_tipg = TIPG_WM_DFLT;
   10928 	else
   10929 		sc->sc_tipg = TIPG_LG_DFLT;
   10930 
   10931 	sc->sc_tbi_serdes_anegticks = 5;
   10932 
   10933 	/* Initialize our media structures */
   10934 	sc->sc_mii.mii_ifp = ifp;
   10935 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10936 
   10937 	if ((sc->sc_type >= WM_T_82575)
   10938 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10939 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10940 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10941 	else
   10942 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10943 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10944 
   10945 	/*
   10946 	 * SWD Pins:
   10947 	 *
   10948 	 *	0 = Link LED (output)
   10949 	 *	1 = Loss Of Signal (input)
   10950 	 */
   10951 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10952 
   10953 	/* XXX Perhaps this is only for TBI */
   10954 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10955 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10956 
   10957 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10958 		sc->sc_ctrl &= ~CTRL_LRST;
   10959 
   10960 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10961 
   10962 #define	ADD(ss, mm, dd)							\
   10963 do {									\
   10964 	aprint_normal("%s%s", sep, ss);					\
   10965 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10966 	sep = ", ";							\
   10967 } while (/*CONSTCOND*/0)
   10968 
   10969 	aprint_normal_dev(sc->sc_dev, "");
   10970 
   10971 	if (sc->sc_type == WM_T_I354) {
   10972 		uint32_t status;
   10973 
   10974 		status = CSR_READ(sc, WMREG_STATUS);
   10975 		if (((status & STATUS_2P5_SKU) != 0)
   10976 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10977 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10978 		} else
   10979 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10980 	} else if (sc->sc_type == WM_T_82545) {
   10981 		/* Only 82545 is LX (XXX except SFP) */
   10982 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10983 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10984 	} else {
   10985 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10986 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10987 	}
   10988 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10989 	aprint_normal("\n");
   10990 
   10991 #undef ADD
   10992 
   10993 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10994 }
   10995 
   10996 /*
   10997  * wm_tbi_mediachange:	[ifmedia interface function]
   10998  *
   10999  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11000  */
   11001 static int
   11002 wm_tbi_mediachange(struct ifnet *ifp)
   11003 {
   11004 	struct wm_softc *sc = ifp->if_softc;
   11005 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11006 	uint32_t status;
   11007 	int i;
   11008 
   11009 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11010 		/* XXX need some work for >= 82571 and < 82575 */
   11011 		if (sc->sc_type < WM_T_82575)
   11012 			return 0;
   11013 	}
   11014 
   11015 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11016 	    || (sc->sc_type >= WM_T_82575))
   11017 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11018 
   11019 	sc->sc_ctrl &= ~CTRL_LRST;
   11020 	sc->sc_txcw = TXCW_ANE;
   11021 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11022 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11023 	else if (ife->ifm_media & IFM_FDX)
   11024 		sc->sc_txcw |= TXCW_FD;
   11025 	else
   11026 		sc->sc_txcw |= TXCW_HD;
   11027 
   11028 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11029 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11030 
   11031 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11032 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11033 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11034 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11035 	CSR_WRITE_FLUSH(sc);
   11036 	delay(1000);
   11037 
   11038 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11039 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11040 
   11041 	/*
   11042 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11043 	 * optics detect a signal, 0 if they don't.
   11044 	 */
   11045 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11046 		/* Have signal; wait for the link to come up. */
   11047 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11048 			delay(10000);
   11049 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11050 				break;
   11051 		}
   11052 
   11053 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11054 			    device_xname(sc->sc_dev),i));
   11055 
   11056 		status = CSR_READ(sc, WMREG_STATUS);
   11057 		DPRINTF(WM_DEBUG_LINK,
   11058 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11059 			device_xname(sc->sc_dev),status, STATUS_LU));
   11060 		if (status & STATUS_LU) {
   11061 			/* Link is up. */
   11062 			DPRINTF(WM_DEBUG_LINK,
   11063 			    ("%s: LINK: set media -> link up %s\n",
   11064 			    device_xname(sc->sc_dev),
   11065 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11066 
   11067 			/*
   11068 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11069 			 * so we should update sc->sc_ctrl
   11070 			 */
   11071 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11072 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11073 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11074 			if (status & STATUS_FD)
   11075 				sc->sc_tctl |=
   11076 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11077 			else
   11078 				sc->sc_tctl |=
   11079 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11080 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11081 				sc->sc_fcrtl |= FCRTL_XONE;
   11082 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11083 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11084 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11085 				      sc->sc_fcrtl);
   11086 			sc->sc_tbi_linkup = 1;
   11087 		} else {
   11088 			if (i == WM_LINKUP_TIMEOUT)
   11089 				wm_check_for_link(sc);
   11090 			/* Link is down. */
   11091 			DPRINTF(WM_DEBUG_LINK,
   11092 			    ("%s: LINK: set media -> link down\n",
   11093 			    device_xname(sc->sc_dev)));
   11094 			sc->sc_tbi_linkup = 0;
   11095 		}
   11096 	} else {
   11097 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11098 		    device_xname(sc->sc_dev)));
   11099 		sc->sc_tbi_linkup = 0;
   11100 	}
   11101 
   11102 	wm_tbi_serdes_set_linkled(sc);
   11103 
   11104 	return 0;
   11105 }
   11106 
   11107 /*
   11108  * wm_tbi_mediastatus:	[ifmedia interface function]
   11109  *
   11110  *	Get the current interface media status on a 1000BASE-X device.
   11111  */
   11112 static void
   11113 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11114 {
   11115 	struct wm_softc *sc = ifp->if_softc;
   11116 	uint32_t ctrl, status;
   11117 
   11118 	ifmr->ifm_status = IFM_AVALID;
   11119 	ifmr->ifm_active = IFM_ETHER;
   11120 
   11121 	status = CSR_READ(sc, WMREG_STATUS);
   11122 	if ((status & STATUS_LU) == 0) {
   11123 		ifmr->ifm_active |= IFM_NONE;
   11124 		return;
   11125 	}
   11126 
   11127 	ifmr->ifm_status |= IFM_ACTIVE;
   11128 	/* Only 82545 is LX */
   11129 	if (sc->sc_type == WM_T_82545)
   11130 		ifmr->ifm_active |= IFM_1000_LX;
   11131 	else
   11132 		ifmr->ifm_active |= IFM_1000_SX;
   11133 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11134 		ifmr->ifm_active |= IFM_FDX;
   11135 	else
   11136 		ifmr->ifm_active |= IFM_HDX;
   11137 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11138 	if (ctrl & CTRL_RFCE)
   11139 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11140 	if (ctrl & CTRL_TFCE)
   11141 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11142 }
   11143 
   11144 /* XXX TBI only */
   11145 static int
   11146 wm_check_for_link(struct wm_softc *sc)
   11147 {
   11148 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11149 	uint32_t rxcw;
   11150 	uint32_t ctrl;
   11151 	uint32_t status;
   11152 	uint32_t sig;
   11153 
   11154 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11155 		/* XXX need some work for >= 82571 */
   11156 		if (sc->sc_type >= WM_T_82571) {
   11157 			sc->sc_tbi_linkup = 1;
   11158 			return 0;
   11159 		}
   11160 	}
   11161 
   11162 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11163 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11164 	status = CSR_READ(sc, WMREG_STATUS);
   11165 
   11166 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11167 
   11168 	DPRINTF(WM_DEBUG_LINK,
   11169 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11170 		device_xname(sc->sc_dev), __func__,
   11171 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11172 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11173 
   11174 	/*
   11175 	 * SWDPIN   LU RXCW
   11176 	 *      0    0    0
   11177 	 *      0    0    1	(should not happen)
   11178 	 *      0    1    0	(should not happen)
   11179 	 *      0    1    1	(should not happen)
   11180 	 *      1    0    0	Disable autonego and force linkup
   11181 	 *      1    0    1	got /C/ but not linkup yet
   11182 	 *      1    1    0	(linkup)
   11183 	 *      1    1    1	If IFM_AUTO, back to autonego
   11184 	 *
   11185 	 */
   11186 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11187 	    && ((status & STATUS_LU) == 0)
   11188 	    && ((rxcw & RXCW_C) == 0)) {
   11189 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11190 			__func__));
   11191 		sc->sc_tbi_linkup = 0;
   11192 		/* Disable auto-negotiation in the TXCW register */
   11193 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11194 
   11195 		/*
   11196 		 * Force link-up and also force full-duplex.
   11197 		 *
   11198 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11199 		 * so we should update sc->sc_ctrl
   11200 		 */
   11201 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11202 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11203 	} else if (((status & STATUS_LU) != 0)
   11204 	    && ((rxcw & RXCW_C) != 0)
   11205 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11206 		sc->sc_tbi_linkup = 1;
   11207 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11208 			__func__));
   11209 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11210 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11211 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11212 	    && ((rxcw & RXCW_C) != 0)) {
   11213 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11214 	} else {
   11215 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11216 			status));
   11217 	}
   11218 
   11219 	return 0;
   11220 }
   11221 
   11222 /*
   11223  * wm_tbi_tick:
   11224  *
   11225  *	Check the link on TBI devices.
   11226  *	This function acts as mii_tick().
   11227  */
   11228 static void
   11229 wm_tbi_tick(struct wm_softc *sc)
   11230 {
   11231 	struct mii_data *mii = &sc->sc_mii;
   11232 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11233 	uint32_t status;
   11234 
   11235 	KASSERT(WM_CORE_LOCKED(sc));
   11236 
   11237 	status = CSR_READ(sc, WMREG_STATUS);
   11238 
   11239 	/* XXX is this needed? */
   11240 	(void)CSR_READ(sc, WMREG_RXCW);
   11241 	(void)CSR_READ(sc, WMREG_CTRL);
   11242 
   11243 	/* set link status */
   11244 	if ((status & STATUS_LU) == 0) {
   11245 		DPRINTF(WM_DEBUG_LINK,
   11246 		    ("%s: LINK: checklink -> down\n",
   11247 			device_xname(sc->sc_dev)));
   11248 		sc->sc_tbi_linkup = 0;
   11249 	} else if (sc->sc_tbi_linkup == 0) {
   11250 		DPRINTF(WM_DEBUG_LINK,
   11251 		    ("%s: LINK: checklink -> up %s\n",
   11252 			device_xname(sc->sc_dev),
   11253 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11254 		sc->sc_tbi_linkup = 1;
   11255 		sc->sc_tbi_serdes_ticks = 0;
   11256 	}
   11257 
   11258 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11259 		goto setled;
   11260 
   11261 	if ((status & STATUS_LU) == 0) {
   11262 		sc->sc_tbi_linkup = 0;
   11263 		/* If the timer expired, retry autonegotiation */
   11264 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11265 		    && (++sc->sc_tbi_serdes_ticks
   11266 			>= sc->sc_tbi_serdes_anegticks)) {
   11267 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11268 			sc->sc_tbi_serdes_ticks = 0;
   11269 			/*
   11270 			 * Reset the link, and let autonegotiation do
   11271 			 * its thing
   11272 			 */
   11273 			sc->sc_ctrl |= CTRL_LRST;
   11274 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11275 			CSR_WRITE_FLUSH(sc);
   11276 			delay(1000);
   11277 			sc->sc_ctrl &= ~CTRL_LRST;
   11278 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11279 			CSR_WRITE_FLUSH(sc);
   11280 			delay(1000);
   11281 			CSR_WRITE(sc, WMREG_TXCW,
   11282 			    sc->sc_txcw & ~TXCW_ANE);
   11283 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11284 		}
   11285 	}
   11286 
   11287 setled:
   11288 	wm_tbi_serdes_set_linkled(sc);
   11289 }
   11290 
   11291 /* SERDES related */
   11292 static void
   11293 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11294 {
   11295 	uint32_t reg;
   11296 
   11297 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11298 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11299 		return;
   11300 
   11301 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11302 	reg |= PCS_CFG_PCS_EN;
   11303 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11304 
   11305 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11306 	reg &= ~CTRL_EXT_SWDPIN(3);
   11307 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11308 	CSR_WRITE_FLUSH(sc);
   11309 }
   11310 
   11311 static int
   11312 wm_serdes_mediachange(struct ifnet *ifp)
   11313 {
   11314 	struct wm_softc *sc = ifp->if_softc;
   11315 	bool pcs_autoneg = true; /* XXX */
   11316 	uint32_t ctrl_ext, pcs_lctl, reg;
   11317 
   11318 	/* XXX Currently, this function is not called on 8257[12] */
   11319 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11320 	    || (sc->sc_type >= WM_T_82575))
   11321 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11322 
   11323 	wm_serdes_power_up_link_82575(sc);
   11324 
   11325 	sc->sc_ctrl |= CTRL_SLU;
   11326 
   11327 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11328 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11329 
   11330 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11331 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11332 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11333 	case CTRL_EXT_LINK_MODE_SGMII:
   11334 		pcs_autoneg = true;
   11335 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11336 		break;
   11337 	case CTRL_EXT_LINK_MODE_1000KX:
   11338 		pcs_autoneg = false;
   11339 		/* FALLTHROUGH */
   11340 	default:
   11341 		if ((sc->sc_type == WM_T_82575)
   11342 		    || (sc->sc_type == WM_T_82576)) {
   11343 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11344 				pcs_autoneg = false;
   11345 		}
   11346 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11347 		    | CTRL_FRCFDX;
   11348 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11349 	}
   11350 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11351 
   11352 	if (pcs_autoneg) {
   11353 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11354 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11355 
   11356 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11357 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11358 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11359 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11360 	} else
   11361 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11362 
   11363 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11364 
   11365 
   11366 	return 0;
   11367 }
   11368 
   11369 static void
   11370 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11371 {
   11372 	struct wm_softc *sc = ifp->if_softc;
   11373 	struct mii_data *mii = &sc->sc_mii;
   11374 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11375 	uint32_t pcs_adv, pcs_lpab, reg;
   11376 
   11377 	ifmr->ifm_status = IFM_AVALID;
   11378 	ifmr->ifm_active = IFM_ETHER;
   11379 
   11380 	/* Check PCS */
   11381 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11382 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11383 		ifmr->ifm_active |= IFM_NONE;
   11384 		sc->sc_tbi_linkup = 0;
   11385 		goto setled;
   11386 	}
   11387 
   11388 	sc->sc_tbi_linkup = 1;
   11389 	ifmr->ifm_status |= IFM_ACTIVE;
   11390 	if (sc->sc_type == WM_T_I354) {
   11391 		uint32_t status;
   11392 
   11393 		status = CSR_READ(sc, WMREG_STATUS);
   11394 		if (((status & STATUS_2P5_SKU) != 0)
   11395 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11396 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11397 		} else
   11398 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11399 	} else {
   11400 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11401 		case PCS_LSTS_SPEED_10:
   11402 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11403 			break;
   11404 		case PCS_LSTS_SPEED_100:
   11405 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11406 			break;
   11407 		case PCS_LSTS_SPEED_1000:
   11408 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11409 			break;
   11410 		default:
   11411 			device_printf(sc->sc_dev, "Unknown speed\n");
   11412 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11413 			break;
   11414 		}
   11415 	}
   11416 	if ((reg & PCS_LSTS_FDX) != 0)
   11417 		ifmr->ifm_active |= IFM_FDX;
   11418 	else
   11419 		ifmr->ifm_active |= IFM_HDX;
   11420 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11421 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11422 		/* Check flow */
   11423 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11424 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11425 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11426 			goto setled;
   11427 		}
   11428 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11429 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11430 		DPRINTF(WM_DEBUG_LINK,
   11431 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11432 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11433 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11434 			mii->mii_media_active |= IFM_FLOW
   11435 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11436 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11437 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11438 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11439 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11440 			mii->mii_media_active |= IFM_FLOW
   11441 			    | IFM_ETH_TXPAUSE;
   11442 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11443 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11444 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11445 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11446 			mii->mii_media_active |= IFM_FLOW
   11447 			    | IFM_ETH_RXPAUSE;
   11448 		}
   11449 	}
   11450 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11451 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11452 setled:
   11453 	wm_tbi_serdes_set_linkled(sc);
   11454 }
   11455 
   11456 /*
   11457  * wm_serdes_tick:
   11458  *
   11459  *	Check the link on serdes devices.
   11460  */
   11461 static void
   11462 wm_serdes_tick(struct wm_softc *sc)
   11463 {
   11464 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11465 	struct mii_data *mii = &sc->sc_mii;
   11466 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11467 	uint32_t reg;
   11468 
   11469 	KASSERT(WM_CORE_LOCKED(sc));
   11470 
   11471 	mii->mii_media_status = IFM_AVALID;
   11472 	mii->mii_media_active = IFM_ETHER;
   11473 
   11474 	/* Check PCS */
   11475 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11476 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11477 		mii->mii_media_status |= IFM_ACTIVE;
   11478 		sc->sc_tbi_linkup = 1;
   11479 		sc->sc_tbi_serdes_ticks = 0;
   11480 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11481 		if ((reg & PCS_LSTS_FDX) != 0)
   11482 			mii->mii_media_active |= IFM_FDX;
   11483 		else
   11484 			mii->mii_media_active |= IFM_HDX;
   11485 	} else {
   11486 		mii->mii_media_status |= IFM_NONE;
   11487 		sc->sc_tbi_linkup = 0;
   11488 		/* If the timer expired, retry autonegotiation */
   11489 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11490 		    && (++sc->sc_tbi_serdes_ticks
   11491 			>= sc->sc_tbi_serdes_anegticks)) {
   11492 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11493 			sc->sc_tbi_serdes_ticks = 0;
   11494 			/* XXX */
   11495 			wm_serdes_mediachange(ifp);
   11496 		}
   11497 	}
   11498 
   11499 	wm_tbi_serdes_set_linkled(sc);
   11500 }
   11501 
   11502 /* SFP related */
   11503 
   11504 static int
   11505 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11506 {
   11507 	uint32_t i2ccmd;
   11508 	int i;
   11509 
   11510 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11511 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11512 
   11513 	/* Poll the ready bit */
   11514 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11515 		delay(50);
   11516 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11517 		if (i2ccmd & I2CCMD_READY)
   11518 			break;
   11519 	}
   11520 	if ((i2ccmd & I2CCMD_READY) == 0)
   11521 		return -1;
   11522 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11523 		return -1;
   11524 
   11525 	*data = i2ccmd & 0x00ff;
   11526 
   11527 	return 0;
   11528 }
   11529 
   11530 static uint32_t
   11531 wm_sfp_get_media_type(struct wm_softc *sc)
   11532 {
   11533 	uint32_t ctrl_ext;
   11534 	uint8_t val = 0;
   11535 	int timeout = 3;
   11536 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11537 	int rv = -1;
   11538 
   11539 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11540 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11541 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11542 	CSR_WRITE_FLUSH(sc);
   11543 
   11544 	/* Read SFP module data */
   11545 	while (timeout) {
   11546 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11547 		if (rv == 0)
   11548 			break;
   11549 		delay(100*1000); /* XXX too big */
   11550 		timeout--;
   11551 	}
   11552 	if (rv != 0)
   11553 		goto out;
   11554 	switch (val) {
   11555 	case SFF_SFP_ID_SFF:
   11556 		aprint_normal_dev(sc->sc_dev,
   11557 		    "Module/Connector soldered to board\n");
   11558 		break;
   11559 	case SFF_SFP_ID_SFP:
   11560 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11561 		break;
   11562 	case SFF_SFP_ID_UNKNOWN:
   11563 		goto out;
   11564 	default:
   11565 		break;
   11566 	}
   11567 
   11568 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11569 	if (rv != 0) {
   11570 		goto out;
   11571 	}
   11572 
   11573 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11574 		mediatype = WM_MEDIATYPE_SERDES;
   11575 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11576 		sc->sc_flags |= WM_F_SGMII;
   11577 		mediatype = WM_MEDIATYPE_COPPER;
   11578 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11579 		sc->sc_flags |= WM_F_SGMII;
   11580 		mediatype = WM_MEDIATYPE_SERDES;
   11581 	}
   11582 
   11583 out:
   11584 	/* Restore I2C interface setting */
   11585 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11586 
   11587 	return mediatype;
   11588 }
   11589 
   11590 /*
   11591  * NVM related.
   11592  * Microwire, SPI (w/wo EERD) and Flash.
   11593  */
   11594 
   11595 /* Both spi and uwire */
   11596 
   11597 /*
   11598  * wm_eeprom_sendbits:
   11599  *
   11600  *	Send a series of bits to the EEPROM.
   11601  */
   11602 static void
   11603 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11604 {
   11605 	uint32_t reg;
   11606 	int x;
   11607 
   11608 	reg = CSR_READ(sc, WMREG_EECD);
   11609 
   11610 	for (x = nbits; x > 0; x--) {
   11611 		if (bits & (1U << (x - 1)))
   11612 			reg |= EECD_DI;
   11613 		else
   11614 			reg &= ~EECD_DI;
   11615 		CSR_WRITE(sc, WMREG_EECD, reg);
   11616 		CSR_WRITE_FLUSH(sc);
   11617 		delay(2);
   11618 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11619 		CSR_WRITE_FLUSH(sc);
   11620 		delay(2);
   11621 		CSR_WRITE(sc, WMREG_EECD, reg);
   11622 		CSR_WRITE_FLUSH(sc);
   11623 		delay(2);
   11624 	}
   11625 }
   11626 
   11627 /*
   11628  * wm_eeprom_recvbits:
   11629  *
   11630  *	Receive a series of bits from the EEPROM.
   11631  */
   11632 static void
   11633 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11634 {
   11635 	uint32_t reg, val;
   11636 	int x;
   11637 
   11638 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11639 
   11640 	val = 0;
   11641 	for (x = nbits; x > 0; x--) {
   11642 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11643 		CSR_WRITE_FLUSH(sc);
   11644 		delay(2);
   11645 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11646 			val |= (1U << (x - 1));
   11647 		CSR_WRITE(sc, WMREG_EECD, reg);
   11648 		CSR_WRITE_FLUSH(sc);
   11649 		delay(2);
   11650 	}
   11651 	*valp = val;
   11652 }
   11653 
   11654 /* Microwire */
   11655 
   11656 /*
   11657  * wm_nvm_read_uwire:
   11658  *
   11659  *	Read a word from the EEPROM using the MicroWire protocol.
   11660  */
   11661 static int
   11662 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11663 {
   11664 	uint32_t reg, val;
   11665 	int i;
   11666 
   11667 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11668 		device_xname(sc->sc_dev), __func__));
   11669 
   11670 	if (sc->nvm.acquire(sc) != 0)
   11671 		return -1;
   11672 
   11673 	for (i = 0; i < wordcnt; i++) {
   11674 		/* Clear SK and DI. */
   11675 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11676 		CSR_WRITE(sc, WMREG_EECD, reg);
   11677 
   11678 		/*
   11679 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11680 		 * and Xen.
   11681 		 *
   11682 		 * We use this workaround only for 82540 because qemu's
   11683 		 * e1000 act as 82540.
   11684 		 */
   11685 		if (sc->sc_type == WM_T_82540) {
   11686 			reg |= EECD_SK;
   11687 			CSR_WRITE(sc, WMREG_EECD, reg);
   11688 			reg &= ~EECD_SK;
   11689 			CSR_WRITE(sc, WMREG_EECD, reg);
   11690 			CSR_WRITE_FLUSH(sc);
   11691 			delay(2);
   11692 		}
   11693 		/* XXX: end of workaround */
   11694 
   11695 		/* Set CHIP SELECT. */
   11696 		reg |= EECD_CS;
   11697 		CSR_WRITE(sc, WMREG_EECD, reg);
   11698 		CSR_WRITE_FLUSH(sc);
   11699 		delay(2);
   11700 
   11701 		/* Shift in the READ command. */
   11702 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11703 
   11704 		/* Shift in address. */
   11705 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11706 
   11707 		/* Shift out the data. */
   11708 		wm_eeprom_recvbits(sc, &val, 16);
   11709 		data[i] = val & 0xffff;
   11710 
   11711 		/* Clear CHIP SELECT. */
   11712 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11713 		CSR_WRITE(sc, WMREG_EECD, reg);
   11714 		CSR_WRITE_FLUSH(sc);
   11715 		delay(2);
   11716 	}
   11717 
   11718 	sc->nvm.release(sc);
   11719 	return 0;
   11720 }
   11721 
   11722 /* SPI */
   11723 
   11724 /*
   11725  * Set SPI and FLASH related information from the EECD register.
   11726  * For 82541 and 82547, the word size is taken from EEPROM.
   11727  */
   11728 static int
   11729 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11730 {
   11731 	int size;
   11732 	uint32_t reg;
   11733 	uint16_t data;
   11734 
   11735 	reg = CSR_READ(sc, WMREG_EECD);
   11736 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11737 
   11738 	/* Read the size of NVM from EECD by default */
   11739 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11740 	switch (sc->sc_type) {
   11741 	case WM_T_82541:
   11742 	case WM_T_82541_2:
   11743 	case WM_T_82547:
   11744 	case WM_T_82547_2:
   11745 		/* Set dummy value to access EEPROM */
   11746 		sc->sc_nvm_wordsize = 64;
   11747 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11748 			aprint_error_dev(sc->sc_dev,
   11749 			    "%s: failed to read EEPROM size\n", __func__);
   11750 		}
   11751 		reg = data;
   11752 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11753 		if (size == 0)
   11754 			size = 6; /* 64 word size */
   11755 		else
   11756 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11757 		break;
   11758 	case WM_T_80003:
   11759 	case WM_T_82571:
   11760 	case WM_T_82572:
   11761 	case WM_T_82573: /* SPI case */
   11762 	case WM_T_82574: /* SPI case */
   11763 	case WM_T_82583: /* SPI case */
   11764 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11765 		if (size > 14)
   11766 			size = 14;
   11767 		break;
   11768 	case WM_T_82575:
   11769 	case WM_T_82576:
   11770 	case WM_T_82580:
   11771 	case WM_T_I350:
   11772 	case WM_T_I354:
   11773 	case WM_T_I210:
   11774 	case WM_T_I211:
   11775 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11776 		if (size > 15)
   11777 			size = 15;
   11778 		break;
   11779 	default:
   11780 		aprint_error_dev(sc->sc_dev,
   11781 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11782 		return -1;
   11783 		break;
   11784 	}
   11785 
   11786 	sc->sc_nvm_wordsize = 1 << size;
   11787 
   11788 	return 0;
   11789 }
   11790 
   11791 /*
   11792  * wm_nvm_ready_spi:
   11793  *
   11794  *	Wait for a SPI EEPROM to be ready for commands.
   11795  */
   11796 static int
   11797 wm_nvm_ready_spi(struct wm_softc *sc)
   11798 {
   11799 	uint32_t val;
   11800 	int usec;
   11801 
   11802 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11803 		device_xname(sc->sc_dev), __func__));
   11804 
   11805 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11806 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11807 		wm_eeprom_recvbits(sc, &val, 8);
   11808 		if ((val & SPI_SR_RDY) == 0)
   11809 			break;
   11810 	}
   11811 	if (usec >= SPI_MAX_RETRIES) {
   11812 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11813 		return -1;
   11814 	}
   11815 	return 0;
   11816 }
   11817 
   11818 /*
   11819  * wm_nvm_read_spi:
   11820  *
   11821  *	Read a work from the EEPROM using the SPI protocol.
   11822  */
   11823 static int
   11824 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11825 {
   11826 	uint32_t reg, val;
   11827 	int i;
   11828 	uint8_t opc;
   11829 	int rv = 0;
   11830 
   11831 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11832 		device_xname(sc->sc_dev), __func__));
   11833 
   11834 	if (sc->nvm.acquire(sc) != 0)
   11835 		return -1;
   11836 
   11837 	/* Clear SK and CS. */
   11838 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11839 	CSR_WRITE(sc, WMREG_EECD, reg);
   11840 	CSR_WRITE_FLUSH(sc);
   11841 	delay(2);
   11842 
   11843 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11844 		goto out;
   11845 
   11846 	/* Toggle CS to flush commands. */
   11847 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11848 	CSR_WRITE_FLUSH(sc);
   11849 	delay(2);
   11850 	CSR_WRITE(sc, WMREG_EECD, reg);
   11851 	CSR_WRITE_FLUSH(sc);
   11852 	delay(2);
   11853 
   11854 	opc = SPI_OPC_READ;
   11855 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11856 		opc |= SPI_OPC_A8;
   11857 
   11858 	wm_eeprom_sendbits(sc, opc, 8);
   11859 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11860 
   11861 	for (i = 0; i < wordcnt; i++) {
   11862 		wm_eeprom_recvbits(sc, &val, 16);
   11863 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11864 	}
   11865 
   11866 	/* Raise CS and clear SK. */
   11867 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11868 	CSR_WRITE(sc, WMREG_EECD, reg);
   11869 	CSR_WRITE_FLUSH(sc);
   11870 	delay(2);
   11871 
   11872 out:
   11873 	sc->nvm.release(sc);
   11874 	return rv;
   11875 }
   11876 
   11877 /* Using with EERD */
   11878 
   11879 static int
   11880 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11881 {
   11882 	uint32_t attempts = 100000;
   11883 	uint32_t i, reg = 0;
   11884 	int32_t done = -1;
   11885 
   11886 	for (i = 0; i < attempts; i++) {
   11887 		reg = CSR_READ(sc, rw);
   11888 
   11889 		if (reg & EERD_DONE) {
   11890 			done = 0;
   11891 			break;
   11892 		}
   11893 		delay(5);
   11894 	}
   11895 
   11896 	return done;
   11897 }
   11898 
   11899 static int
   11900 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11901     uint16_t *data)
   11902 {
   11903 	int i, eerd = 0;
   11904 	int rv = 0;
   11905 
   11906 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11907 		device_xname(sc->sc_dev), __func__));
   11908 
   11909 	if (sc->nvm.acquire(sc) != 0)
   11910 		return -1;
   11911 
   11912 	for (i = 0; i < wordcnt; i++) {
   11913 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11914 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11915 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11916 		if (rv != 0) {
   11917 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11918 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11919 			break;
   11920 		}
   11921 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11922 	}
   11923 
   11924 	sc->nvm.release(sc);
   11925 	return rv;
   11926 }
   11927 
   11928 /* Flash */
   11929 
   11930 static int
   11931 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11932 {
   11933 	uint32_t eecd;
   11934 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11935 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11936 	uint8_t sig_byte = 0;
   11937 
   11938 	switch (sc->sc_type) {
   11939 	case WM_T_PCH_SPT:
   11940 		/*
   11941 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11942 		 * sector valid bits from the NVM.
   11943 		 */
   11944 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11945 		if ((*bank == 0) || (*bank == 1)) {
   11946 			aprint_error_dev(sc->sc_dev,
   11947 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11948 				*bank);
   11949 			return -1;
   11950 		} else {
   11951 			*bank = *bank - 2;
   11952 			return 0;
   11953 		}
   11954 	case WM_T_ICH8:
   11955 	case WM_T_ICH9:
   11956 		eecd = CSR_READ(sc, WMREG_EECD);
   11957 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11958 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11959 			return 0;
   11960 		}
   11961 		/* FALLTHROUGH */
   11962 	default:
   11963 		/* Default to 0 */
   11964 		*bank = 0;
   11965 
   11966 		/* Check bank 0 */
   11967 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11968 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11969 			*bank = 0;
   11970 			return 0;
   11971 		}
   11972 
   11973 		/* Check bank 1 */
   11974 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11975 		    &sig_byte);
   11976 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11977 			*bank = 1;
   11978 			return 0;
   11979 		}
   11980 	}
   11981 
   11982 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11983 		device_xname(sc->sc_dev)));
   11984 	return -1;
   11985 }
   11986 
   11987 /******************************************************************************
   11988  * This function does initial flash setup so that a new read/write/erase cycle
   11989  * can be started.
   11990  *
   11991  * sc - The pointer to the hw structure
   11992  ****************************************************************************/
   11993 static int32_t
   11994 wm_ich8_cycle_init(struct wm_softc *sc)
   11995 {
   11996 	uint16_t hsfsts;
   11997 	int32_t error = 1;
   11998 	int32_t i     = 0;
   11999 
   12000 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12001 
   12002 	/* May be check the Flash Des Valid bit in Hw status */
   12003 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12004 		return error;
   12005 	}
   12006 
   12007 	/* Clear FCERR in Hw status by writing 1 */
   12008 	/* Clear DAEL in Hw status by writing a 1 */
   12009 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12010 
   12011 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12012 
   12013 	/*
   12014 	 * Either we should have a hardware SPI cycle in progress bit to check
   12015 	 * against, in order to start a new cycle or FDONE bit should be
   12016 	 * changed in the hardware so that it is 1 after harware reset, which
   12017 	 * can then be used as an indication whether a cycle is in progress or
   12018 	 * has been completed .. we should also have some software semaphore
   12019 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12020 	 * threads access to those bits can be sequentiallized or a way so that
   12021 	 * 2 threads dont start the cycle at the same time
   12022 	 */
   12023 
   12024 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12025 		/*
   12026 		 * There is no cycle running at present, so we can start a
   12027 		 * cycle
   12028 		 */
   12029 
   12030 		/* Begin by setting Flash Cycle Done. */
   12031 		hsfsts |= HSFSTS_DONE;
   12032 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12033 		error = 0;
   12034 	} else {
   12035 		/*
   12036 		 * otherwise poll for sometime so the current cycle has a
   12037 		 * chance to end before giving up.
   12038 		 */
   12039 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12040 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12041 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12042 				error = 0;
   12043 				break;
   12044 			}
   12045 			delay(1);
   12046 		}
   12047 		if (error == 0) {
   12048 			/*
   12049 			 * Successful in waiting for previous cycle to timeout,
   12050 			 * now set the Flash Cycle Done.
   12051 			 */
   12052 			hsfsts |= HSFSTS_DONE;
   12053 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12054 		}
   12055 	}
   12056 	return error;
   12057 }
   12058 
   12059 /******************************************************************************
   12060  * This function starts a flash cycle and waits for its completion
   12061  *
   12062  * sc - The pointer to the hw structure
   12063  ****************************************************************************/
   12064 static int32_t
   12065 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12066 {
   12067 	uint16_t hsflctl;
   12068 	uint16_t hsfsts;
   12069 	int32_t error = 1;
   12070 	uint32_t i = 0;
   12071 
   12072 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12073 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12074 	hsflctl |= HSFCTL_GO;
   12075 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12076 
   12077 	/* Wait till FDONE bit is set to 1 */
   12078 	do {
   12079 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12080 		if (hsfsts & HSFSTS_DONE)
   12081 			break;
   12082 		delay(1);
   12083 		i++;
   12084 	} while (i < timeout);
   12085 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12086 		error = 0;
   12087 
   12088 	return error;
   12089 }
   12090 
   12091 /******************************************************************************
   12092  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12093  *
   12094  * sc - The pointer to the hw structure
   12095  * index - The index of the byte or word to read.
   12096  * size - Size of data to read, 1=byte 2=word, 4=dword
   12097  * data - Pointer to the word to store the value read.
   12098  *****************************************************************************/
   12099 static int32_t
   12100 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12101     uint32_t size, uint32_t *data)
   12102 {
   12103 	uint16_t hsfsts;
   12104 	uint16_t hsflctl;
   12105 	uint32_t flash_linear_address;
   12106 	uint32_t flash_data = 0;
   12107 	int32_t error = 1;
   12108 	int32_t count = 0;
   12109 
   12110 	if (size < 1  || size > 4 || data == 0x0 ||
   12111 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12112 		return error;
   12113 
   12114 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12115 	    sc->sc_ich8_flash_base;
   12116 
   12117 	do {
   12118 		delay(1);
   12119 		/* Steps */
   12120 		error = wm_ich8_cycle_init(sc);
   12121 		if (error)
   12122 			break;
   12123 
   12124 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12125 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12126 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12127 		    & HSFCTL_BCOUNT_MASK;
   12128 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12129 		if (sc->sc_type == WM_T_PCH_SPT) {
   12130 			/*
   12131 			 * In SPT, This register is in Lan memory space, not
   12132 			 * flash. Therefore, only 32 bit access is supported.
   12133 			 */
   12134 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12135 			    (uint32_t)hsflctl);
   12136 		} else
   12137 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12138 
   12139 		/*
   12140 		 * Write the last 24 bits of index into Flash Linear address
   12141 		 * field in Flash Address
   12142 		 */
   12143 		/* TODO: TBD maybe check the index against the size of flash */
   12144 
   12145 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12146 
   12147 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12148 
   12149 		/*
   12150 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12151 		 * the whole sequence a few more times, else read in (shift in)
   12152 		 * the Flash Data0, the order is least significant byte first
   12153 		 * msb to lsb
   12154 		 */
   12155 		if (error == 0) {
   12156 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12157 			if (size == 1)
   12158 				*data = (uint8_t)(flash_data & 0x000000FF);
   12159 			else if (size == 2)
   12160 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12161 			else if (size == 4)
   12162 				*data = (uint32_t)flash_data;
   12163 			break;
   12164 		} else {
   12165 			/*
   12166 			 * If we've gotten here, then things are probably
   12167 			 * completely hosed, but if the error condition is
   12168 			 * detected, it won't hurt to give it another try...
   12169 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12170 			 */
   12171 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12172 			if (hsfsts & HSFSTS_ERR) {
   12173 				/* Repeat for some time before giving up. */
   12174 				continue;
   12175 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12176 				break;
   12177 		}
   12178 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12179 
   12180 	return error;
   12181 }
   12182 
   12183 /******************************************************************************
   12184  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12185  *
   12186  * sc - pointer to wm_hw structure
   12187  * index - The index of the byte to read.
   12188  * data - Pointer to a byte to store the value read.
   12189  *****************************************************************************/
   12190 static int32_t
   12191 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12192 {
   12193 	int32_t status;
   12194 	uint32_t word = 0;
   12195 
   12196 	status = wm_read_ich8_data(sc, index, 1, &word);
   12197 	if (status == 0)
   12198 		*data = (uint8_t)word;
   12199 	else
   12200 		*data = 0;
   12201 
   12202 	return status;
   12203 }
   12204 
   12205 /******************************************************************************
   12206  * Reads a word from the NVM using the ICH8 flash access registers.
   12207  *
   12208  * sc - pointer to wm_hw structure
   12209  * index - The starting byte index of the word to read.
   12210  * data - Pointer to a word to store the value read.
   12211  *****************************************************************************/
   12212 static int32_t
   12213 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12214 {
   12215 	int32_t status;
   12216 	uint32_t word = 0;
   12217 
   12218 	status = wm_read_ich8_data(sc, index, 2, &word);
   12219 	if (status == 0)
   12220 		*data = (uint16_t)word;
   12221 	else
   12222 		*data = 0;
   12223 
   12224 	return status;
   12225 }
   12226 
   12227 /******************************************************************************
   12228  * Reads a dword from the NVM using the ICH8 flash access registers.
   12229  *
   12230  * sc - pointer to wm_hw structure
   12231  * index - The starting byte index of the word to read.
   12232  * data - Pointer to a word to store the value read.
   12233  *****************************************************************************/
   12234 static int32_t
   12235 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12236 {
   12237 	int32_t status;
   12238 
   12239 	status = wm_read_ich8_data(sc, index, 4, data);
   12240 	return status;
   12241 }
   12242 
   12243 /******************************************************************************
   12244  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12245  * register.
   12246  *
   12247  * sc - Struct containing variables accessed by shared code
   12248  * offset - offset of word in the EEPROM to read
   12249  * data - word read from the EEPROM
   12250  * words - number of words to read
   12251  *****************************************************************************/
   12252 static int
   12253 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12254 {
   12255 	int32_t  rv = 0;
   12256 	uint32_t flash_bank = 0;
   12257 	uint32_t act_offset = 0;
   12258 	uint32_t bank_offset = 0;
   12259 	uint16_t word = 0;
   12260 	uint16_t i = 0;
   12261 
   12262 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12263 		device_xname(sc->sc_dev), __func__));
   12264 
   12265 	if (sc->nvm.acquire(sc) != 0)
   12266 		return -1;
   12267 
   12268 	/*
   12269 	 * We need to know which is the valid flash bank.  In the event
   12270 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12271 	 * managing flash_bank.  So it cannot be trusted and needs
   12272 	 * to be updated with each read.
   12273 	 */
   12274 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12275 	if (rv) {
   12276 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12277 			device_xname(sc->sc_dev)));
   12278 		flash_bank = 0;
   12279 	}
   12280 
   12281 	/*
   12282 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12283 	 * size
   12284 	 */
   12285 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12286 
   12287 	for (i = 0; i < words; i++) {
   12288 		/* The NVM part needs a byte offset, hence * 2 */
   12289 		act_offset = bank_offset + ((offset + i) * 2);
   12290 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12291 		if (rv) {
   12292 			aprint_error_dev(sc->sc_dev,
   12293 			    "%s: failed to read NVM\n", __func__);
   12294 			break;
   12295 		}
   12296 		data[i] = word;
   12297 	}
   12298 
   12299 	sc->nvm.release(sc);
   12300 	return rv;
   12301 }
   12302 
   12303 /******************************************************************************
   12304  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12305  * register.
   12306  *
   12307  * sc - Struct containing variables accessed by shared code
   12308  * offset - offset of word in the EEPROM to read
   12309  * data - word read from the EEPROM
   12310  * words - number of words to read
   12311  *****************************************************************************/
   12312 static int
   12313 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12314 {
   12315 	int32_t  rv = 0;
   12316 	uint32_t flash_bank = 0;
   12317 	uint32_t act_offset = 0;
   12318 	uint32_t bank_offset = 0;
   12319 	uint32_t dword = 0;
   12320 	uint16_t i = 0;
   12321 
   12322 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12323 		device_xname(sc->sc_dev), __func__));
   12324 
   12325 	if (sc->nvm.acquire(sc) != 0)
   12326 		return -1;
   12327 
   12328 	/*
   12329 	 * We need to know which is the valid flash bank.  In the event
   12330 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12331 	 * managing flash_bank.  So it cannot be trusted and needs
   12332 	 * to be updated with each read.
   12333 	 */
   12334 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12335 	if (rv) {
   12336 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12337 			device_xname(sc->sc_dev)));
   12338 		flash_bank = 0;
   12339 	}
   12340 
   12341 	/*
   12342 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12343 	 * size
   12344 	 */
   12345 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12346 
   12347 	for (i = 0; i < words; i++) {
   12348 		/* The NVM part needs a byte offset, hence * 2 */
   12349 		act_offset = bank_offset + ((offset + i) * 2);
   12350 		/* but we must read dword aligned, so mask ... */
   12351 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12352 		if (rv) {
   12353 			aprint_error_dev(sc->sc_dev,
   12354 			    "%s: failed to read NVM\n", __func__);
   12355 			break;
   12356 		}
   12357 		/* ... and pick out low or high word */
   12358 		if ((act_offset & 0x2) == 0)
   12359 			data[i] = (uint16_t)(dword & 0xFFFF);
   12360 		else
   12361 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12362 	}
   12363 
   12364 	sc->nvm.release(sc);
   12365 	return rv;
   12366 }
   12367 
   12368 /* iNVM */
   12369 
   12370 static int
   12371 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12372 {
   12373 	int32_t  rv = 0;
   12374 	uint32_t invm_dword;
   12375 	uint16_t i;
   12376 	uint8_t record_type, word_address;
   12377 
   12378 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12379 		device_xname(sc->sc_dev), __func__));
   12380 
   12381 	for (i = 0; i < INVM_SIZE; i++) {
   12382 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12383 		/* Get record type */
   12384 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12385 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12386 			break;
   12387 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12388 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12389 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12390 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12391 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12392 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12393 			if (word_address == address) {
   12394 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12395 				rv = 0;
   12396 				break;
   12397 			}
   12398 		}
   12399 	}
   12400 
   12401 	return rv;
   12402 }
   12403 
   12404 static int
   12405 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12406 {
   12407 	int rv = 0;
   12408 	int i;
   12409 
   12410 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12411 		device_xname(sc->sc_dev), __func__));
   12412 
   12413 	if (sc->nvm.acquire(sc) != 0)
   12414 		return -1;
   12415 
   12416 	for (i = 0; i < words; i++) {
   12417 		switch (offset + i) {
   12418 		case NVM_OFF_MACADDR:
   12419 		case NVM_OFF_MACADDR1:
   12420 		case NVM_OFF_MACADDR2:
   12421 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12422 			if (rv != 0) {
   12423 				data[i] = 0xffff;
   12424 				rv = -1;
   12425 			}
   12426 			break;
   12427 		case NVM_OFF_CFG2:
   12428 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12429 			if (rv != 0) {
   12430 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12431 				rv = 0;
   12432 			}
   12433 			break;
   12434 		case NVM_OFF_CFG4:
   12435 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12436 			if (rv != 0) {
   12437 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12438 				rv = 0;
   12439 			}
   12440 			break;
   12441 		case NVM_OFF_LED_1_CFG:
   12442 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12443 			if (rv != 0) {
   12444 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12445 				rv = 0;
   12446 			}
   12447 			break;
   12448 		case NVM_OFF_LED_0_2_CFG:
   12449 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12450 			if (rv != 0) {
   12451 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12452 				rv = 0;
   12453 			}
   12454 			break;
   12455 		case NVM_OFF_ID_LED_SETTINGS:
   12456 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12457 			if (rv != 0) {
   12458 				*data = ID_LED_RESERVED_FFFF;
   12459 				rv = 0;
   12460 			}
   12461 			break;
   12462 		default:
   12463 			DPRINTF(WM_DEBUG_NVM,
   12464 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12465 			*data = NVM_RESERVED_WORD;
   12466 			break;
   12467 		}
   12468 	}
   12469 
   12470 	sc->nvm.release(sc);
   12471 	return rv;
   12472 }
   12473 
   12474 /* Lock, detecting NVM type, validate checksum, version and read */
   12475 
   12476 static int
   12477 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12478 {
   12479 	uint32_t eecd = 0;
   12480 
   12481 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12482 	    || sc->sc_type == WM_T_82583) {
   12483 		eecd = CSR_READ(sc, WMREG_EECD);
   12484 
   12485 		/* Isolate bits 15 & 16 */
   12486 		eecd = ((eecd >> 15) & 0x03);
   12487 
   12488 		/* If both bits are set, device is Flash type */
   12489 		if (eecd == 0x03)
   12490 			return 0;
   12491 	}
   12492 	return 1;
   12493 }
   12494 
   12495 static int
   12496 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12497 {
   12498 	uint32_t eec;
   12499 
   12500 	eec = CSR_READ(sc, WMREG_EEC);
   12501 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12502 		return 1;
   12503 
   12504 	return 0;
   12505 }
   12506 
   12507 /*
   12508  * wm_nvm_validate_checksum
   12509  *
   12510  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12511  */
   12512 static int
   12513 wm_nvm_validate_checksum(struct wm_softc *sc)
   12514 {
   12515 	uint16_t checksum;
   12516 	uint16_t eeprom_data;
   12517 #ifdef WM_DEBUG
   12518 	uint16_t csum_wordaddr, valid_checksum;
   12519 #endif
   12520 	int i;
   12521 
   12522 	checksum = 0;
   12523 
   12524 	/* Don't check for I211 */
   12525 	if (sc->sc_type == WM_T_I211)
   12526 		return 0;
   12527 
   12528 #ifdef WM_DEBUG
   12529 	if (sc->sc_type == WM_T_PCH_LPT) {
   12530 		csum_wordaddr = NVM_OFF_COMPAT;
   12531 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12532 	} else {
   12533 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12534 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12535 	}
   12536 
   12537 	/* Dump EEPROM image for debug */
   12538 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12539 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12540 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12541 		/* XXX PCH_SPT? */
   12542 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12543 		if ((eeprom_data & valid_checksum) == 0) {
   12544 			DPRINTF(WM_DEBUG_NVM,
   12545 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12546 				device_xname(sc->sc_dev), eeprom_data,
   12547 				    valid_checksum));
   12548 		}
   12549 	}
   12550 
   12551 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12552 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12553 		for (i = 0; i < NVM_SIZE; i++) {
   12554 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12555 				printf("XXXX ");
   12556 			else
   12557 				printf("%04hx ", eeprom_data);
   12558 			if (i % 8 == 7)
   12559 				printf("\n");
   12560 		}
   12561 	}
   12562 
   12563 #endif /* WM_DEBUG */
   12564 
   12565 	for (i = 0; i < NVM_SIZE; i++) {
   12566 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12567 			return 1;
   12568 		checksum += eeprom_data;
   12569 	}
   12570 
   12571 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12572 #ifdef WM_DEBUG
   12573 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12574 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12575 #endif
   12576 	}
   12577 
   12578 	return 0;
   12579 }
   12580 
   12581 static void
   12582 wm_nvm_version_invm(struct wm_softc *sc)
   12583 {
   12584 	uint32_t dword;
   12585 
   12586 	/*
   12587 	 * Linux's code to decode version is very strange, so we don't
   12588 	 * obey that algorithm and just use word 61 as the document.
   12589 	 * Perhaps it's not perfect though...
   12590 	 *
   12591 	 * Example:
   12592 	 *
   12593 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12594 	 */
   12595 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12596 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12597 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12598 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12599 }
   12600 
   12601 static void
   12602 wm_nvm_version(struct wm_softc *sc)
   12603 {
   12604 	uint16_t major, minor, build, patch;
   12605 	uint16_t uid0, uid1;
   12606 	uint16_t nvm_data;
   12607 	uint16_t off;
   12608 	bool check_version = false;
   12609 	bool check_optionrom = false;
   12610 	bool have_build = false;
   12611 	bool have_uid = true;
   12612 
   12613 	/*
   12614 	 * Version format:
   12615 	 *
   12616 	 * XYYZ
   12617 	 * X0YZ
   12618 	 * X0YY
   12619 	 *
   12620 	 * Example:
   12621 	 *
   12622 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12623 	 *	82571	0x50a6	5.10.6?
   12624 	 *	82572	0x506a	5.6.10?
   12625 	 *	82572EI	0x5069	5.6.9?
   12626 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12627 	 *		0x2013	2.1.3?
   12628 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12629 	 */
   12630 
   12631 	/*
   12632 	 * XXX
   12633 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12634 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12635 	 */
   12636 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12637 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12638 		have_uid = false;
   12639 
   12640 	switch (sc->sc_type) {
   12641 	case WM_T_82571:
   12642 	case WM_T_82572:
   12643 	case WM_T_82574:
   12644 	case WM_T_82583:
   12645 		check_version = true;
   12646 		check_optionrom = true;
   12647 		have_build = true;
   12648 		break;
   12649 	case WM_T_82575:
   12650 	case WM_T_82576:
   12651 	case WM_T_82580:
   12652 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12653 			check_version = true;
   12654 		break;
   12655 	case WM_T_I211:
   12656 		wm_nvm_version_invm(sc);
   12657 		have_uid = false;
   12658 		goto printver;
   12659 	case WM_T_I210:
   12660 		if (!wm_nvm_flash_presence_i210(sc)) {
   12661 			wm_nvm_version_invm(sc);
   12662 			have_uid = false;
   12663 			goto printver;
   12664 		}
   12665 		/* FALLTHROUGH */
   12666 	case WM_T_I350:
   12667 	case WM_T_I354:
   12668 		check_version = true;
   12669 		check_optionrom = true;
   12670 		break;
   12671 	default:
   12672 		return;
   12673 	}
   12674 	if (check_version
   12675 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12676 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12677 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12678 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12679 			build = nvm_data & NVM_BUILD_MASK;
   12680 			have_build = true;
   12681 		} else
   12682 			minor = nvm_data & 0x00ff;
   12683 
   12684 		/* Decimal */
   12685 		minor = (minor / 16) * 10 + (minor % 16);
   12686 		sc->sc_nvm_ver_major = major;
   12687 		sc->sc_nvm_ver_minor = minor;
   12688 
   12689 printver:
   12690 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12691 		    sc->sc_nvm_ver_minor);
   12692 		if (have_build) {
   12693 			sc->sc_nvm_ver_build = build;
   12694 			aprint_verbose(".%d", build);
   12695 		}
   12696 	}
   12697 
   12698 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12699 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12700 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12701 		/* Option ROM Version */
   12702 		if ((off != 0x0000) && (off != 0xffff)) {
   12703 			int rv;
   12704 
   12705 			off += NVM_COMBO_VER_OFF;
   12706 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12707 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12708 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12709 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12710 				/* 16bits */
   12711 				major = uid0 >> 8;
   12712 				build = (uid0 << 8) | (uid1 >> 8);
   12713 				patch = uid1 & 0x00ff;
   12714 				aprint_verbose(", option ROM Version %d.%d.%d",
   12715 				    major, build, patch);
   12716 			}
   12717 		}
   12718 	}
   12719 
   12720 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12721 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12722 }
   12723 
   12724 /*
   12725  * wm_nvm_read:
   12726  *
   12727  *	Read data from the serial EEPROM.
   12728  */
   12729 static int
   12730 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12731 {
   12732 	int rv;
   12733 
   12734 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12735 		device_xname(sc->sc_dev), __func__));
   12736 
   12737 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12738 		return -1;
   12739 
   12740 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12741 
   12742 	return rv;
   12743 }
   12744 
   12745 /*
   12746  * Hardware semaphores.
   12747  * Very complexed...
   12748  */
   12749 
   12750 static int
   12751 wm_get_null(struct wm_softc *sc)
   12752 {
   12753 
   12754 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12755 		device_xname(sc->sc_dev), __func__));
   12756 	return 0;
   12757 }
   12758 
   12759 static void
   12760 wm_put_null(struct wm_softc *sc)
   12761 {
   12762 
   12763 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12764 		device_xname(sc->sc_dev), __func__));
   12765 	return;
   12766 }
   12767 
   12768 static int
   12769 wm_get_eecd(struct wm_softc *sc)
   12770 {
   12771 	uint32_t reg;
   12772 	int x;
   12773 
   12774 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12775 		device_xname(sc->sc_dev), __func__));
   12776 
   12777 	reg = CSR_READ(sc, WMREG_EECD);
   12778 
   12779 	/* Request EEPROM access. */
   12780 	reg |= EECD_EE_REQ;
   12781 	CSR_WRITE(sc, WMREG_EECD, reg);
   12782 
   12783 	/* ..and wait for it to be granted. */
   12784 	for (x = 0; x < 1000; x++) {
   12785 		reg = CSR_READ(sc, WMREG_EECD);
   12786 		if (reg & EECD_EE_GNT)
   12787 			break;
   12788 		delay(5);
   12789 	}
   12790 	if ((reg & EECD_EE_GNT) == 0) {
   12791 		aprint_error_dev(sc->sc_dev,
   12792 		    "could not acquire EEPROM GNT\n");
   12793 		reg &= ~EECD_EE_REQ;
   12794 		CSR_WRITE(sc, WMREG_EECD, reg);
   12795 		return -1;
   12796 	}
   12797 
   12798 	return 0;
   12799 }
   12800 
   12801 static void
   12802 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12803 {
   12804 
   12805 	*eecd |= EECD_SK;
   12806 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12807 	CSR_WRITE_FLUSH(sc);
   12808 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12809 		delay(1);
   12810 	else
   12811 		delay(50);
   12812 }
   12813 
   12814 static void
   12815 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12816 {
   12817 
   12818 	*eecd &= ~EECD_SK;
   12819 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12820 	CSR_WRITE_FLUSH(sc);
   12821 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12822 		delay(1);
   12823 	else
   12824 		delay(50);
   12825 }
   12826 
   12827 static void
   12828 wm_put_eecd(struct wm_softc *sc)
   12829 {
   12830 	uint32_t reg;
   12831 
   12832 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12833 		device_xname(sc->sc_dev), __func__));
   12834 
   12835 	/* Stop nvm */
   12836 	reg = CSR_READ(sc, WMREG_EECD);
   12837 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12838 		/* Pull CS high */
   12839 		reg |= EECD_CS;
   12840 		wm_nvm_eec_clock_lower(sc, &reg);
   12841 	} else {
   12842 		/* CS on Microwire is active-high */
   12843 		reg &= ~(EECD_CS | EECD_DI);
   12844 		CSR_WRITE(sc, WMREG_EECD, reg);
   12845 		wm_nvm_eec_clock_raise(sc, &reg);
   12846 		wm_nvm_eec_clock_lower(sc, &reg);
   12847 	}
   12848 
   12849 	reg = CSR_READ(sc, WMREG_EECD);
   12850 	reg &= ~EECD_EE_REQ;
   12851 	CSR_WRITE(sc, WMREG_EECD, reg);
   12852 
   12853 	return;
   12854 }
   12855 
   12856 /*
   12857  * Get hardware semaphore.
   12858  * Same as e1000_get_hw_semaphore_generic()
   12859  */
   12860 static int
   12861 wm_get_swsm_semaphore(struct wm_softc *sc)
   12862 {
   12863 	int32_t timeout;
   12864 	uint32_t swsm;
   12865 
   12866 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12867 		device_xname(sc->sc_dev), __func__));
   12868 	KASSERT(sc->sc_nvm_wordsize > 0);
   12869 
   12870 retry:
   12871 	/* Get the SW semaphore. */
   12872 	timeout = sc->sc_nvm_wordsize + 1;
   12873 	while (timeout) {
   12874 		swsm = CSR_READ(sc, WMREG_SWSM);
   12875 
   12876 		if ((swsm & SWSM_SMBI) == 0)
   12877 			break;
   12878 
   12879 		delay(50);
   12880 		timeout--;
   12881 	}
   12882 
   12883 	if (timeout == 0) {
   12884 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12885 			/*
   12886 			 * In rare circumstances, the SW semaphore may already
   12887 			 * be held unintentionally. Clear the semaphore once
   12888 			 * before giving up.
   12889 			 */
   12890 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12891 			wm_put_swsm_semaphore(sc);
   12892 			goto retry;
   12893 		}
   12894 		aprint_error_dev(sc->sc_dev,
   12895 		    "could not acquire SWSM SMBI\n");
   12896 		return 1;
   12897 	}
   12898 
   12899 	/* Get the FW semaphore. */
   12900 	timeout = sc->sc_nvm_wordsize + 1;
   12901 	while (timeout) {
   12902 		swsm = CSR_READ(sc, WMREG_SWSM);
   12903 		swsm |= SWSM_SWESMBI;
   12904 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12905 		/* If we managed to set the bit we got the semaphore. */
   12906 		swsm = CSR_READ(sc, WMREG_SWSM);
   12907 		if (swsm & SWSM_SWESMBI)
   12908 			break;
   12909 
   12910 		delay(50);
   12911 		timeout--;
   12912 	}
   12913 
   12914 	if (timeout == 0) {
   12915 		aprint_error_dev(sc->sc_dev,
   12916 		    "could not acquire SWSM SWESMBI\n");
   12917 		/* Release semaphores */
   12918 		wm_put_swsm_semaphore(sc);
   12919 		return 1;
   12920 	}
   12921 	return 0;
   12922 }
   12923 
   12924 /*
   12925  * Put hardware semaphore.
   12926  * Same as e1000_put_hw_semaphore_generic()
   12927  */
   12928 static void
   12929 wm_put_swsm_semaphore(struct wm_softc *sc)
   12930 {
   12931 	uint32_t swsm;
   12932 
   12933 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12934 		device_xname(sc->sc_dev), __func__));
   12935 
   12936 	swsm = CSR_READ(sc, WMREG_SWSM);
   12937 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12938 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12939 }
   12940 
   12941 /*
   12942  * Get SW/FW semaphore.
   12943  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12944  */
   12945 static int
   12946 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12947 {
   12948 	uint32_t swfw_sync;
   12949 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12950 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12951 	int timeout;
   12952 
   12953 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12954 		device_xname(sc->sc_dev), __func__));
   12955 
   12956 	if (sc->sc_type == WM_T_80003)
   12957 		timeout = 50;
   12958 	else
   12959 		timeout = 200;
   12960 
   12961 	for (timeout = 0; timeout < 200; timeout++) {
   12962 		if (wm_get_swsm_semaphore(sc)) {
   12963 			aprint_error_dev(sc->sc_dev,
   12964 			    "%s: failed to get semaphore\n",
   12965 			    __func__);
   12966 			return 1;
   12967 		}
   12968 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12969 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12970 			swfw_sync |= swmask;
   12971 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12972 			wm_put_swsm_semaphore(sc);
   12973 			return 0;
   12974 		}
   12975 		wm_put_swsm_semaphore(sc);
   12976 		delay(5000);
   12977 	}
   12978 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12979 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12980 	return 1;
   12981 }
   12982 
   12983 static void
   12984 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12985 {
   12986 	uint32_t swfw_sync;
   12987 
   12988 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12989 		device_xname(sc->sc_dev), __func__));
   12990 
   12991 	while (wm_get_swsm_semaphore(sc) != 0)
   12992 		continue;
   12993 
   12994 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12995 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12996 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12997 
   12998 	wm_put_swsm_semaphore(sc);
   12999 }
   13000 
   13001 static int
   13002 wm_get_nvm_80003(struct wm_softc *sc)
   13003 {
   13004 	int rv;
   13005 
   13006 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13007 		device_xname(sc->sc_dev), __func__));
   13008 
   13009 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13010 		aprint_error_dev(sc->sc_dev,
   13011 		    "%s: failed to get semaphore(SWFW)\n",
   13012 		    __func__);
   13013 		return rv;
   13014 	}
   13015 
   13016 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13017 	    && (rv = wm_get_eecd(sc)) != 0) {
   13018 		aprint_error_dev(sc->sc_dev,
   13019 		    "%s: failed to get semaphore(EECD)\n",
   13020 		    __func__);
   13021 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13022 		return rv;
   13023 	}
   13024 
   13025 	return 0;
   13026 }
   13027 
   13028 static void
   13029 wm_put_nvm_80003(struct wm_softc *sc)
   13030 {
   13031 
   13032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13033 		device_xname(sc->sc_dev), __func__));
   13034 
   13035 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13036 		wm_put_eecd(sc);
   13037 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13038 }
   13039 
   13040 static int
   13041 wm_get_nvm_82571(struct wm_softc *sc)
   13042 {
   13043 	int rv;
   13044 
   13045 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13046 		device_xname(sc->sc_dev), __func__));
   13047 
   13048 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13049 		return rv;
   13050 
   13051 	switch (sc->sc_type) {
   13052 	case WM_T_82573:
   13053 		break;
   13054 	default:
   13055 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13056 			rv = wm_get_eecd(sc);
   13057 		break;
   13058 	}
   13059 
   13060 	if (rv != 0) {
   13061 		aprint_error_dev(sc->sc_dev,
   13062 		    "%s: failed to get semaphore\n",
   13063 		    __func__);
   13064 		wm_put_swsm_semaphore(sc);
   13065 	}
   13066 
   13067 	return rv;
   13068 }
   13069 
   13070 static void
   13071 wm_put_nvm_82571(struct wm_softc *sc)
   13072 {
   13073 
   13074 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13075 		device_xname(sc->sc_dev), __func__));
   13076 
   13077 	switch (sc->sc_type) {
   13078 	case WM_T_82573:
   13079 		break;
   13080 	default:
   13081 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13082 			wm_put_eecd(sc);
   13083 		break;
   13084 	}
   13085 
   13086 	wm_put_swsm_semaphore(sc);
   13087 }
   13088 
   13089 static int
   13090 wm_get_phy_82575(struct wm_softc *sc)
   13091 {
   13092 
   13093 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13094 		device_xname(sc->sc_dev), __func__));
   13095 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13096 }
   13097 
   13098 static void
   13099 wm_put_phy_82575(struct wm_softc *sc)
   13100 {
   13101 
   13102 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13103 		device_xname(sc->sc_dev), __func__));
   13104 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13105 }
   13106 
   13107 static int
   13108 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13109 {
   13110 	uint32_t ext_ctrl;
   13111 	int timeout = 200;
   13112 
   13113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13114 		device_xname(sc->sc_dev), __func__));
   13115 
   13116 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13117 	for (timeout = 0; timeout < 200; timeout++) {
   13118 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13119 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13120 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13121 
   13122 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13123 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13124 			return 0;
   13125 		delay(5000);
   13126 	}
   13127 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13128 	    device_xname(sc->sc_dev), ext_ctrl);
   13129 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13130 	return 1;
   13131 }
   13132 
   13133 static void
   13134 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13135 {
   13136 	uint32_t ext_ctrl;
   13137 
   13138 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13139 		device_xname(sc->sc_dev), __func__));
   13140 
   13141 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13142 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13143 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13144 
   13145 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13146 }
   13147 
   13148 static int
   13149 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13150 {
   13151 	uint32_t ext_ctrl;
   13152 	int timeout;
   13153 
   13154 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13155 		device_xname(sc->sc_dev), __func__));
   13156 	mutex_enter(sc->sc_ich_phymtx);
   13157 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13158 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13159 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13160 			break;
   13161 		delay(1000);
   13162 	}
   13163 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13164 		printf("%s: SW has already locked the resource\n",
   13165 		    device_xname(sc->sc_dev));
   13166 		goto out;
   13167 	}
   13168 
   13169 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13170 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13171 	for (timeout = 0; timeout < 1000; timeout++) {
   13172 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13173 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13174 			break;
   13175 		delay(1000);
   13176 	}
   13177 	if (timeout >= 1000) {
   13178 		printf("%s: failed to acquire semaphore\n",
   13179 		    device_xname(sc->sc_dev));
   13180 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13181 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13182 		goto out;
   13183 	}
   13184 	return 0;
   13185 
   13186 out:
   13187 	mutex_exit(sc->sc_ich_phymtx);
   13188 	return 1;
   13189 }
   13190 
   13191 static void
   13192 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13193 {
   13194 	uint32_t ext_ctrl;
   13195 
   13196 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13197 		device_xname(sc->sc_dev), __func__));
   13198 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13199 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13200 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13201 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13202 	} else {
   13203 		printf("%s: Semaphore unexpectedly released\n",
   13204 		    device_xname(sc->sc_dev));
   13205 	}
   13206 
   13207 	mutex_exit(sc->sc_ich_phymtx);
   13208 }
   13209 
   13210 static int
   13211 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13212 {
   13213 
   13214 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13215 		device_xname(sc->sc_dev), __func__));
   13216 	mutex_enter(sc->sc_ich_nvmmtx);
   13217 
   13218 	return 0;
   13219 }
   13220 
   13221 static void
   13222 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13223 {
   13224 
   13225 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13226 		device_xname(sc->sc_dev), __func__));
   13227 	mutex_exit(sc->sc_ich_nvmmtx);
   13228 }
   13229 
   13230 static int
   13231 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13232 {
   13233 	int i = 0;
   13234 	uint32_t reg;
   13235 
   13236 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13237 		device_xname(sc->sc_dev), __func__));
   13238 
   13239 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13240 	do {
   13241 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13242 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13243 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13244 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13245 			break;
   13246 		delay(2*1000);
   13247 		i++;
   13248 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13249 
   13250 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13251 		wm_put_hw_semaphore_82573(sc);
   13252 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13253 		    device_xname(sc->sc_dev));
   13254 		return -1;
   13255 	}
   13256 
   13257 	return 0;
   13258 }
   13259 
   13260 static void
   13261 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13262 {
   13263 	uint32_t reg;
   13264 
   13265 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13266 		device_xname(sc->sc_dev), __func__));
   13267 
   13268 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13269 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13270 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13271 }
   13272 
   13273 /*
   13274  * Management mode and power management related subroutines.
   13275  * BMC, AMT, suspend/resume and EEE.
   13276  */
   13277 
   13278 #ifdef WM_WOL
   13279 static int
   13280 wm_check_mng_mode(struct wm_softc *sc)
   13281 {
   13282 	int rv;
   13283 
   13284 	switch (sc->sc_type) {
   13285 	case WM_T_ICH8:
   13286 	case WM_T_ICH9:
   13287 	case WM_T_ICH10:
   13288 	case WM_T_PCH:
   13289 	case WM_T_PCH2:
   13290 	case WM_T_PCH_LPT:
   13291 	case WM_T_PCH_SPT:
   13292 		rv = wm_check_mng_mode_ich8lan(sc);
   13293 		break;
   13294 	case WM_T_82574:
   13295 	case WM_T_82583:
   13296 		rv = wm_check_mng_mode_82574(sc);
   13297 		break;
   13298 	case WM_T_82571:
   13299 	case WM_T_82572:
   13300 	case WM_T_82573:
   13301 	case WM_T_80003:
   13302 		rv = wm_check_mng_mode_generic(sc);
   13303 		break;
   13304 	default:
   13305 		/* noting to do */
   13306 		rv = 0;
   13307 		break;
   13308 	}
   13309 
   13310 	return rv;
   13311 }
   13312 
   13313 static int
   13314 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13315 {
   13316 	uint32_t fwsm;
   13317 
   13318 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13319 
   13320 	if (((fwsm & FWSM_FW_VALID) != 0)
   13321 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13322 		return 1;
   13323 
   13324 	return 0;
   13325 }
   13326 
   13327 static int
   13328 wm_check_mng_mode_82574(struct wm_softc *sc)
   13329 {
   13330 	uint16_t data;
   13331 
   13332 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13333 
   13334 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13335 		return 1;
   13336 
   13337 	return 0;
   13338 }
   13339 
   13340 static int
   13341 wm_check_mng_mode_generic(struct wm_softc *sc)
   13342 {
   13343 	uint32_t fwsm;
   13344 
   13345 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13346 
   13347 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13348 		return 1;
   13349 
   13350 	return 0;
   13351 }
   13352 #endif /* WM_WOL */
   13353 
   13354 static int
   13355 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13356 {
   13357 	uint32_t manc, fwsm, factps;
   13358 
   13359 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13360 		return 0;
   13361 
   13362 	manc = CSR_READ(sc, WMREG_MANC);
   13363 
   13364 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13365 		device_xname(sc->sc_dev), manc));
   13366 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13367 		return 0;
   13368 
   13369 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13370 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13371 		factps = CSR_READ(sc, WMREG_FACTPS);
   13372 		if (((factps & FACTPS_MNGCG) == 0)
   13373 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13374 			return 1;
   13375 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13376 		uint16_t data;
   13377 
   13378 		factps = CSR_READ(sc, WMREG_FACTPS);
   13379 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13380 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13381 			device_xname(sc->sc_dev), factps, data));
   13382 		if (((factps & FACTPS_MNGCG) == 0)
   13383 		    && ((data & NVM_CFG2_MNGM_MASK)
   13384 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13385 			return 1;
   13386 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13387 	    && ((manc & MANC_ASF_EN) == 0))
   13388 		return 1;
   13389 
   13390 	return 0;
   13391 }
   13392 
   13393 static bool
   13394 wm_phy_resetisblocked(struct wm_softc *sc)
   13395 {
   13396 	bool blocked = false;
   13397 	uint32_t reg;
   13398 	int i = 0;
   13399 
   13400 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13401 		device_xname(sc->sc_dev), __func__));
   13402 
   13403 	switch (sc->sc_type) {
   13404 	case WM_T_ICH8:
   13405 	case WM_T_ICH9:
   13406 	case WM_T_ICH10:
   13407 	case WM_T_PCH:
   13408 	case WM_T_PCH2:
   13409 	case WM_T_PCH_LPT:
   13410 	case WM_T_PCH_SPT:
   13411 		do {
   13412 			reg = CSR_READ(sc, WMREG_FWSM);
   13413 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13414 				blocked = true;
   13415 				delay(10*1000);
   13416 				continue;
   13417 			}
   13418 			blocked = false;
   13419 		} while (blocked && (i++ < 30));
   13420 		return blocked;
   13421 		break;
   13422 	case WM_T_82571:
   13423 	case WM_T_82572:
   13424 	case WM_T_82573:
   13425 	case WM_T_82574:
   13426 	case WM_T_82583:
   13427 	case WM_T_80003:
   13428 		reg = CSR_READ(sc, WMREG_MANC);
   13429 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13430 			return true;
   13431 		else
   13432 			return false;
   13433 		break;
   13434 	default:
   13435 		/* no problem */
   13436 		break;
   13437 	}
   13438 
   13439 	return false;
   13440 }
   13441 
   13442 static void
   13443 wm_get_hw_control(struct wm_softc *sc)
   13444 {
   13445 	uint32_t reg;
   13446 
   13447 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13448 		device_xname(sc->sc_dev), __func__));
   13449 
   13450 	if (sc->sc_type == WM_T_82573) {
   13451 		reg = CSR_READ(sc, WMREG_SWSM);
   13452 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13453 	} else if (sc->sc_type >= WM_T_82571) {
   13454 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13455 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13456 	}
   13457 }
   13458 
   13459 static void
   13460 wm_release_hw_control(struct wm_softc *sc)
   13461 {
   13462 	uint32_t reg;
   13463 
   13464 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13465 		device_xname(sc->sc_dev), __func__));
   13466 
   13467 	if (sc->sc_type == WM_T_82573) {
   13468 		reg = CSR_READ(sc, WMREG_SWSM);
   13469 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13470 	} else if (sc->sc_type >= WM_T_82571) {
   13471 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13472 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13473 	}
   13474 }
   13475 
   13476 static void
   13477 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13478 {
   13479 	uint32_t reg;
   13480 
   13481 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13482 		device_xname(sc->sc_dev), __func__));
   13483 
   13484 	if (sc->sc_type < WM_T_PCH2)
   13485 		return;
   13486 
   13487 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13488 
   13489 	if (gate)
   13490 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13491 	else
   13492 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13493 
   13494 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13495 }
   13496 
   13497 static void
   13498 wm_smbustopci(struct wm_softc *sc)
   13499 {
   13500 	uint32_t fwsm, reg;
   13501 	int rv = 0;
   13502 
   13503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13504 		device_xname(sc->sc_dev), __func__));
   13505 
   13506 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13507 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13508 
   13509 	/* Disable ULP */
   13510 	wm_ulp_disable(sc);
   13511 
   13512 	/* Acquire PHY semaphore */
   13513 	sc->phy.acquire(sc);
   13514 
   13515 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13516 	switch (sc->sc_type) {
   13517 	case WM_T_PCH_LPT:
   13518 	case WM_T_PCH_SPT:
   13519 		if (wm_phy_is_accessible_pchlan(sc))
   13520 			break;
   13521 
   13522 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13523 		reg |= CTRL_EXT_FORCE_SMBUS;
   13524 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13525 #if 0
   13526 		/* XXX Isn't this required??? */
   13527 		CSR_WRITE_FLUSH(sc);
   13528 #endif
   13529 		delay(50 * 1000);
   13530 		/* FALLTHROUGH */
   13531 	case WM_T_PCH2:
   13532 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13533 			break;
   13534 		/* FALLTHROUGH */
   13535 	case WM_T_PCH:
   13536 		if (sc->sc_type == WM_T_PCH)
   13537 			if ((fwsm & FWSM_FW_VALID) != 0)
   13538 				break;
   13539 
   13540 		if (wm_phy_resetisblocked(sc) == true) {
   13541 			printf("XXX reset is blocked(3)\n");
   13542 			break;
   13543 		}
   13544 
   13545 		wm_toggle_lanphypc_pch_lpt(sc);
   13546 
   13547 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13548 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13549 				break;
   13550 
   13551 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13552 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13553 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13554 
   13555 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13556 				break;
   13557 			rv = -1;
   13558 		}
   13559 		break;
   13560 	default:
   13561 		break;
   13562 	}
   13563 
   13564 	/* Release semaphore */
   13565 	sc->phy.release(sc);
   13566 
   13567 	if (rv == 0) {
   13568 		if (wm_phy_resetisblocked(sc)) {
   13569 			printf("XXX reset is blocked(4)\n");
   13570 			goto out;
   13571 		}
   13572 		wm_reset_phy(sc);
   13573 		if (wm_phy_resetisblocked(sc))
   13574 			printf("XXX reset is blocked(4)\n");
   13575 	}
   13576 
   13577 out:
   13578 	/*
   13579 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13580 	 */
   13581 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13582 		delay(10*1000);
   13583 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13584 	}
   13585 }
   13586 
   13587 static void
   13588 wm_init_manageability(struct wm_softc *sc)
   13589 {
   13590 
   13591 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13592 		device_xname(sc->sc_dev), __func__));
   13593 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13594 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13595 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13596 
   13597 		/* Disable hardware interception of ARP */
   13598 		manc &= ~MANC_ARP_EN;
   13599 
   13600 		/* Enable receiving management packets to the host */
   13601 		if (sc->sc_type >= WM_T_82571) {
   13602 			manc |= MANC_EN_MNG2HOST;
   13603 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13604 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13605 		}
   13606 
   13607 		CSR_WRITE(sc, WMREG_MANC, manc);
   13608 	}
   13609 }
   13610 
   13611 static void
   13612 wm_release_manageability(struct wm_softc *sc)
   13613 {
   13614 
   13615 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13616 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13617 
   13618 		manc |= MANC_ARP_EN;
   13619 		if (sc->sc_type >= WM_T_82571)
   13620 			manc &= ~MANC_EN_MNG2HOST;
   13621 
   13622 		CSR_WRITE(sc, WMREG_MANC, manc);
   13623 	}
   13624 }
   13625 
   13626 static void
   13627 wm_get_wakeup(struct wm_softc *sc)
   13628 {
   13629 
   13630 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13631 	switch (sc->sc_type) {
   13632 	case WM_T_82573:
   13633 	case WM_T_82583:
   13634 		sc->sc_flags |= WM_F_HAS_AMT;
   13635 		/* FALLTHROUGH */
   13636 	case WM_T_80003:
   13637 	case WM_T_82575:
   13638 	case WM_T_82576:
   13639 	case WM_T_82580:
   13640 	case WM_T_I350:
   13641 	case WM_T_I354:
   13642 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13643 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13644 		/* FALLTHROUGH */
   13645 	case WM_T_82541:
   13646 	case WM_T_82541_2:
   13647 	case WM_T_82547:
   13648 	case WM_T_82547_2:
   13649 	case WM_T_82571:
   13650 	case WM_T_82572:
   13651 	case WM_T_82574:
   13652 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13653 		break;
   13654 	case WM_T_ICH8:
   13655 	case WM_T_ICH9:
   13656 	case WM_T_ICH10:
   13657 	case WM_T_PCH:
   13658 	case WM_T_PCH2:
   13659 	case WM_T_PCH_LPT:
   13660 	case WM_T_PCH_SPT:
   13661 		sc->sc_flags |= WM_F_HAS_AMT;
   13662 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13663 		break;
   13664 	default:
   13665 		break;
   13666 	}
   13667 
   13668 	/* 1: HAS_MANAGE */
   13669 	if (wm_enable_mng_pass_thru(sc) != 0)
   13670 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13671 
   13672 	/*
   13673 	 * Note that the WOL flags is set after the resetting of the eeprom
   13674 	 * stuff
   13675 	 */
   13676 }
   13677 
   13678 /*
   13679  * Unconfigure Ultra Low Power mode.
   13680  * Only for I217 and newer (see below).
   13681  */
   13682 static void
   13683 wm_ulp_disable(struct wm_softc *sc)
   13684 {
   13685 	uint32_t reg;
   13686 	int i = 0;
   13687 
   13688 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13689 		device_xname(sc->sc_dev), __func__));
   13690 	/* Exclude old devices */
   13691 	if ((sc->sc_type < WM_T_PCH_LPT)
   13692 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13693 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13694 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13695 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13696 		return;
   13697 
   13698 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13699 		/* Request ME un-configure ULP mode in the PHY */
   13700 		reg = CSR_READ(sc, WMREG_H2ME);
   13701 		reg &= ~H2ME_ULP;
   13702 		reg |= H2ME_ENFORCE_SETTINGS;
   13703 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13704 
   13705 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13706 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13707 			if (i++ == 30) {
   13708 				printf("%s timed out\n", __func__);
   13709 				return;
   13710 			}
   13711 			delay(10 * 1000);
   13712 		}
   13713 		reg = CSR_READ(sc, WMREG_H2ME);
   13714 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13715 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13716 
   13717 		return;
   13718 	}
   13719 
   13720 	/* Acquire semaphore */
   13721 	sc->phy.acquire(sc);
   13722 
   13723 	/* Toggle LANPHYPC */
   13724 	wm_toggle_lanphypc_pch_lpt(sc);
   13725 
   13726 	/* Unforce SMBus mode in PHY */
   13727 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13728 	if (reg == 0x0000 || reg == 0xffff) {
   13729 		uint32_t reg2;
   13730 
   13731 		printf("%s: Force SMBus first.\n", __func__);
   13732 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13733 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13734 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13735 		delay(50 * 1000);
   13736 
   13737 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13738 	}
   13739 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13740 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13741 
   13742 	/* Unforce SMBus mode in MAC */
   13743 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13744 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13745 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13746 
   13747 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13748 	reg |= HV_PM_CTRL_K1_ENA;
   13749 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13750 
   13751 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13752 	reg &= ~(I218_ULP_CONFIG1_IND
   13753 	    | I218_ULP_CONFIG1_STICKY_ULP
   13754 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13755 	    | I218_ULP_CONFIG1_WOL_HOST
   13756 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13757 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13758 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13759 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13760 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13761 	reg |= I218_ULP_CONFIG1_START;
   13762 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13763 
   13764 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13765 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13766 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13767 
   13768 	/* Release semaphore */
   13769 	sc->phy.release(sc);
   13770 	wm_gmii_reset(sc);
   13771 	delay(50 * 1000);
   13772 }
   13773 
   13774 /* WOL in the newer chipset interfaces (pchlan) */
   13775 static void
   13776 wm_enable_phy_wakeup(struct wm_softc *sc)
   13777 {
   13778 #if 0
   13779 	uint16_t preg;
   13780 
   13781 	/* Copy MAC RARs to PHY RARs */
   13782 
   13783 	/* Copy MAC MTA to PHY MTA */
   13784 
   13785 	/* Configure PHY Rx Control register */
   13786 
   13787 	/* Enable PHY wakeup in MAC register */
   13788 
   13789 	/* Configure and enable PHY wakeup in PHY registers */
   13790 
   13791 	/* Activate PHY wakeup */
   13792 
   13793 	/* XXX */
   13794 #endif
   13795 }
   13796 
   13797 /* Power down workaround on D3 */
   13798 static void
   13799 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13800 {
   13801 	uint32_t reg;
   13802 	int i;
   13803 
   13804 	for (i = 0; i < 2; i++) {
   13805 		/* Disable link */
   13806 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13807 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13808 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13809 
   13810 		/*
   13811 		 * Call gig speed drop workaround on Gig disable before
   13812 		 * accessing any PHY registers
   13813 		 */
   13814 		if (sc->sc_type == WM_T_ICH8)
   13815 			wm_gig_downshift_workaround_ich8lan(sc);
   13816 
   13817 		/* Write VR power-down enable */
   13818 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13819 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13820 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13821 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13822 
   13823 		/* Read it back and test */
   13824 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13825 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13826 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13827 			break;
   13828 
   13829 		/* Issue PHY reset and repeat at most one more time */
   13830 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13831 	}
   13832 }
   13833 
   13834 static void
   13835 wm_enable_wakeup(struct wm_softc *sc)
   13836 {
   13837 	uint32_t reg, pmreg;
   13838 	pcireg_t pmode;
   13839 
   13840 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13841 		device_xname(sc->sc_dev), __func__));
   13842 
   13843 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13844 		&pmreg, NULL) == 0)
   13845 		return;
   13846 
   13847 	/* Advertise the wakeup capability */
   13848 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13849 	    | CTRL_SWDPIN(3));
   13850 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13851 
   13852 	/* ICH workaround */
   13853 	switch (sc->sc_type) {
   13854 	case WM_T_ICH8:
   13855 	case WM_T_ICH9:
   13856 	case WM_T_ICH10:
   13857 	case WM_T_PCH:
   13858 	case WM_T_PCH2:
   13859 	case WM_T_PCH_LPT:
   13860 	case WM_T_PCH_SPT:
   13861 		/* Disable gig during WOL */
   13862 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13863 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13864 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13865 		if (sc->sc_type == WM_T_PCH)
   13866 			wm_gmii_reset(sc);
   13867 
   13868 		/* Power down workaround */
   13869 		if (sc->sc_phytype == WMPHY_82577) {
   13870 			struct mii_softc *child;
   13871 
   13872 			/* Assume that the PHY is copper */
   13873 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13874 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13875 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13876 				    (768 << 5) | 25, 0x0444); /* magic num */
   13877 		}
   13878 		break;
   13879 	default:
   13880 		break;
   13881 	}
   13882 
   13883 	/* Keep the laser running on fiber adapters */
   13884 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13885 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13886 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13887 		reg |= CTRL_EXT_SWDPIN(3);
   13888 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13889 	}
   13890 
   13891 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13892 #if 0	/* for the multicast packet */
   13893 	reg |= WUFC_MC;
   13894 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13895 #endif
   13896 
   13897 	if (sc->sc_type >= WM_T_PCH)
   13898 		wm_enable_phy_wakeup(sc);
   13899 	else {
   13900 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13901 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13902 	}
   13903 
   13904 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13905 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13906 		|| (sc->sc_type == WM_T_PCH2))
   13907 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13908 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13909 
   13910 	/* Request PME */
   13911 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13912 #if 0
   13913 	/* Disable WOL */
   13914 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13915 #else
   13916 	/* For WOL */
   13917 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13918 #endif
   13919 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13920 }
   13921 
   13922 /* Disable ASPM L0s and/or L1 for workaround */
   13923 static void
   13924 wm_disable_aspm(struct wm_softc *sc)
   13925 {
   13926 	pcireg_t reg, mask = 0;
   13927 	unsigned const char *str = "";
   13928 
   13929 	/*
   13930 	 *  Only for PCIe device which has PCIe capability in the PCI config
   13931 	 * space.
   13932 	 */
   13933 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   13934 		return;
   13935 
   13936 	switch (sc->sc_type) {
   13937 	case WM_T_82571:
   13938 	case WM_T_82572:
   13939 		/*
   13940 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   13941 		 * State Power management L1 State (ASPM L1).
   13942 		 */
   13943 		mask = PCIE_LCSR_ASPM_L1;
   13944 		str = "L1 is";
   13945 		break;
   13946 	case WM_T_82573:
   13947 	case WM_T_82574:
   13948 	case WM_T_82583:
   13949 		/*
   13950 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   13951 		 *
   13952 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   13953 		 * some chipset.  The document of 82574 and 82583 says that
   13954 		 * disabling L0s with some specific chipset is sufficient,
   13955 		 * but we follow as of the Intel em driver does.
   13956 		 *
   13957 		 * References:
   13958 		 * Errata 8 of the Specification Update of i82573.
   13959 		 * Errata 20 of the Specification Update of i82574.
   13960 		 * Errata 9 of the Specification Update of i82583.
   13961 		 */
   13962 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   13963 		str = "L0s and L1 are";
   13964 		break;
   13965 	default:
   13966 		return;
   13967 	}
   13968 
   13969 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13970 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   13971 	reg &= ~mask;
   13972 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13973 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   13974 
   13975 	/* Print only in wm_attach() */
   13976 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   13977 		aprint_verbose_dev(sc->sc_dev,
   13978 		    "ASPM %s disabled to workaround the errata.\n",
   13979 			str);
   13980 }
   13981 
   13982 /* LPLU */
   13983 
   13984 static void
   13985 wm_lplu_d0_disable(struct wm_softc *sc)
   13986 {
   13987 	struct mii_data *mii = &sc->sc_mii;
   13988 	uint32_t reg;
   13989 
   13990 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13991 		device_xname(sc->sc_dev), __func__));
   13992 
   13993 	if (sc->sc_phytype == WMPHY_IFE)
   13994 		return;
   13995 
   13996 	switch (sc->sc_type) {
   13997 	case WM_T_82571:
   13998 	case WM_T_82572:
   13999 	case WM_T_82573:
   14000 	case WM_T_82575:
   14001 	case WM_T_82576:
   14002 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14003 		reg &= ~PMR_D0_LPLU;
   14004 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14005 		break;
   14006 	case WM_T_82580:
   14007 	case WM_T_I350:
   14008 	case WM_T_I210:
   14009 	case WM_T_I211:
   14010 		reg = CSR_READ(sc, WMREG_PHPM);
   14011 		reg &= ~PHPM_D0A_LPLU;
   14012 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14013 		break;
   14014 	case WM_T_82574:
   14015 	case WM_T_82583:
   14016 	case WM_T_ICH8:
   14017 	case WM_T_ICH9:
   14018 	case WM_T_ICH10:
   14019 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14020 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14021 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14022 		CSR_WRITE_FLUSH(sc);
   14023 		break;
   14024 	case WM_T_PCH:
   14025 	case WM_T_PCH2:
   14026 	case WM_T_PCH_LPT:
   14027 	case WM_T_PCH_SPT:
   14028 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14029 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14030 		if (wm_phy_resetisblocked(sc) == false)
   14031 			reg |= HV_OEM_BITS_ANEGNOW;
   14032 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14033 		break;
   14034 	default:
   14035 		break;
   14036 	}
   14037 }
   14038 
   14039 /* EEE */
   14040 
   14041 static void
   14042 wm_set_eee_i350(struct wm_softc *sc)
   14043 {
   14044 	uint32_t ipcnfg, eeer;
   14045 
   14046 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14047 	eeer = CSR_READ(sc, WMREG_EEER);
   14048 
   14049 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14050 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14051 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14052 		    | EEER_LPI_FC);
   14053 	} else {
   14054 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14055 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14056 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14057 		    | EEER_LPI_FC);
   14058 	}
   14059 
   14060 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14061 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14062 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14063 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14064 }
   14065 
   14066 /*
   14067  * Workarounds (mainly PHY related).
   14068  * Basically, PHY's workarounds are in the PHY drivers.
   14069  */
   14070 
   14071 /* Work-around for 82566 Kumeran PCS lock loss */
   14072 static void
   14073 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14074 {
   14075 	struct mii_data *mii = &sc->sc_mii;
   14076 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14077 	int i;
   14078 	int reg;
   14079 
   14080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14081 		device_xname(sc->sc_dev), __func__));
   14082 
   14083 	/* If the link is not up, do nothing */
   14084 	if ((status & STATUS_LU) == 0)
   14085 		return;
   14086 
   14087 	/* Nothing to do if the link is other than 1Gbps */
   14088 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14089 		return;
   14090 
   14091 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14092 	for (i = 0; i < 10; i++) {
   14093 		/* read twice */
   14094 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14095 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14096 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14097 			goto out;	/* GOOD! */
   14098 
   14099 		/* Reset the PHY */
   14100 		wm_reset_phy(sc);
   14101 		delay(5*1000);
   14102 	}
   14103 
   14104 	/* Disable GigE link negotiation */
   14105 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14106 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14107 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14108 
   14109 	/*
   14110 	 * Call gig speed drop workaround on Gig disable before accessing
   14111 	 * any PHY registers.
   14112 	 */
   14113 	wm_gig_downshift_workaround_ich8lan(sc);
   14114 
   14115 out:
   14116 	return;
   14117 }
   14118 
   14119 /* WOL from S5 stops working */
   14120 static void
   14121 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14122 {
   14123 	uint16_t kmreg;
   14124 
   14125 	/* Only for igp3 */
   14126 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14127 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14128 			return;
   14129 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14130 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14131 			return;
   14132 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14133 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14134 	}
   14135 }
   14136 
   14137 /*
   14138  * Workaround for pch's PHYs
   14139  * XXX should be moved to new PHY driver?
   14140  */
   14141 static void
   14142 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14143 {
   14144 
   14145 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14146 		device_xname(sc->sc_dev), __func__));
   14147 	KASSERT(sc->sc_type == WM_T_PCH);
   14148 
   14149 	if (sc->sc_phytype == WMPHY_82577)
   14150 		wm_set_mdio_slow_mode_hv(sc);
   14151 
   14152 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14153 
   14154 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14155 
   14156 	/* 82578 */
   14157 	if (sc->sc_phytype == WMPHY_82578) {
   14158 		struct mii_softc *child;
   14159 
   14160 		/*
   14161 		 * Return registers to default by doing a soft reset then
   14162 		 * writing 0x3140 to the control register
   14163 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14164 		 */
   14165 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14166 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14167 			PHY_RESET(child);
   14168 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14169 			    0x3140);
   14170 		}
   14171 	}
   14172 
   14173 	/* Select page 0 */
   14174 	sc->phy.acquire(sc);
   14175 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14176 	sc->phy.release(sc);
   14177 
   14178 	/*
   14179 	 * Configure the K1 Si workaround during phy reset assuming there is
   14180 	 * link so that it disables K1 if link is in 1Gbps.
   14181 	 */
   14182 	wm_k1_gig_workaround_hv(sc, 1);
   14183 }
   14184 
   14185 static void
   14186 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14187 {
   14188 
   14189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14190 		device_xname(sc->sc_dev), __func__));
   14191 	KASSERT(sc->sc_type == WM_T_PCH2);
   14192 
   14193 	wm_set_mdio_slow_mode_hv(sc);
   14194 }
   14195 
   14196 static int
   14197 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14198 {
   14199 	int k1_enable = sc->sc_nvm_k1_enabled;
   14200 
   14201 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14202 		device_xname(sc->sc_dev), __func__));
   14203 
   14204 	if (sc->phy.acquire(sc) != 0)
   14205 		return -1;
   14206 
   14207 	if (link) {
   14208 		k1_enable = 0;
   14209 
   14210 		/* Link stall fix for link up */
   14211 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14212 	} else {
   14213 		/* Link stall fix for link down */
   14214 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14215 	}
   14216 
   14217 	wm_configure_k1_ich8lan(sc, k1_enable);
   14218 	sc->phy.release(sc);
   14219 
   14220 	return 0;
   14221 }
   14222 
   14223 static void
   14224 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14225 {
   14226 	uint32_t reg;
   14227 
   14228 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14229 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14230 	    reg | HV_KMRN_MDIO_SLOW);
   14231 }
   14232 
   14233 static void
   14234 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14235 {
   14236 	uint32_t ctrl, ctrl_ext, tmp;
   14237 	uint16_t kmreg;
   14238 	int rv;
   14239 
   14240 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14241 	if (rv != 0)
   14242 		return;
   14243 
   14244 	if (k1_enable)
   14245 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14246 	else
   14247 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14248 
   14249 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14250 	if (rv != 0)
   14251 		return;
   14252 
   14253 	delay(20);
   14254 
   14255 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14256 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14257 
   14258 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14259 	tmp |= CTRL_FRCSPD;
   14260 
   14261 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14262 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14263 	CSR_WRITE_FLUSH(sc);
   14264 	delay(20);
   14265 
   14266 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14267 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14268 	CSR_WRITE_FLUSH(sc);
   14269 	delay(20);
   14270 
   14271 	return;
   14272 }
   14273 
   14274 /* special case - for 82575 - need to do manual init ... */
   14275 static void
   14276 wm_reset_init_script_82575(struct wm_softc *sc)
   14277 {
   14278 	/*
   14279 	 * remark: this is untested code - we have no board without EEPROM
   14280 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14281 	 */
   14282 
   14283 	/* SerDes configuration via SERDESCTRL */
   14284 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14285 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14286 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14287 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14288 
   14289 	/* CCM configuration via CCMCTL register */
   14290 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14291 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14292 
   14293 	/* PCIe lanes configuration */
   14294 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14295 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14296 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14298 
   14299 	/* PCIe PLL Configuration */
   14300 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14301 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14302 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14303 }
   14304 
   14305 static void
   14306 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14307 {
   14308 	uint32_t reg;
   14309 	uint16_t nvmword;
   14310 	int rv;
   14311 
   14312 	if (sc->sc_type != WM_T_82580)
   14313 		return;
   14314 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14315 		return;
   14316 
   14317 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14318 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14319 	if (rv != 0) {
   14320 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14321 		    __func__);
   14322 		return;
   14323 	}
   14324 
   14325 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14326 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14327 		reg |= MDICNFG_DEST;
   14328 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14329 		reg |= MDICNFG_COM_MDIO;
   14330 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14331 }
   14332 
   14333 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14334 
   14335 static bool
   14336 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14337 {
   14338 	int i;
   14339 	uint32_t reg;
   14340 	uint16_t id1, id2;
   14341 
   14342 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14343 		device_xname(sc->sc_dev), __func__));
   14344 	id1 = id2 = 0xffff;
   14345 	for (i = 0; i < 2; i++) {
   14346 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14347 		if (MII_INVALIDID(id1))
   14348 			continue;
   14349 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14350 		if (MII_INVALIDID(id2))
   14351 			continue;
   14352 		break;
   14353 	}
   14354 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14355 		goto out;
   14356 	}
   14357 
   14358 	if (sc->sc_type < WM_T_PCH_LPT) {
   14359 		sc->phy.release(sc);
   14360 		wm_set_mdio_slow_mode_hv(sc);
   14361 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14362 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14363 		sc->phy.acquire(sc);
   14364 	}
   14365 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14366 		printf("XXX return with false\n");
   14367 		return false;
   14368 	}
   14369 out:
   14370 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14371 		/* Only unforce SMBus if ME is not active */
   14372 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14373 			/* Unforce SMBus mode in PHY */
   14374 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14375 			    CV_SMB_CTRL);
   14376 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14377 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14378 			    CV_SMB_CTRL, reg);
   14379 
   14380 			/* Unforce SMBus mode in MAC */
   14381 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14382 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14383 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14384 		}
   14385 	}
   14386 	return true;
   14387 }
   14388 
   14389 static void
   14390 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14391 {
   14392 	uint32_t reg;
   14393 	int i;
   14394 
   14395 	/* Set PHY Config Counter to 50msec */
   14396 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14397 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14398 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14399 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14400 
   14401 	/* Toggle LANPHYPC */
   14402 	reg = CSR_READ(sc, WMREG_CTRL);
   14403 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14404 	reg &= ~CTRL_LANPHYPC_VALUE;
   14405 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14406 	CSR_WRITE_FLUSH(sc);
   14407 	delay(1000);
   14408 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14409 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14410 	CSR_WRITE_FLUSH(sc);
   14411 
   14412 	if (sc->sc_type < WM_T_PCH_LPT)
   14413 		delay(50 * 1000);
   14414 	else {
   14415 		i = 20;
   14416 
   14417 		do {
   14418 			delay(5 * 1000);
   14419 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14420 		    && i--);
   14421 
   14422 		delay(30 * 1000);
   14423 	}
   14424 }
   14425 
   14426 static int
   14427 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14428 {
   14429 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14430 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14431 	uint32_t rxa;
   14432 	uint16_t scale = 0, lat_enc = 0;
   14433 	int32_t obff_hwm = 0;
   14434 	int64_t lat_ns, value;
   14435 
   14436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14437 		device_xname(sc->sc_dev), __func__));
   14438 
   14439 	if (link) {
   14440 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14441 		uint32_t status;
   14442 		uint16_t speed;
   14443 		pcireg_t preg;
   14444 
   14445 		status = CSR_READ(sc, WMREG_STATUS);
   14446 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14447 		case STATUS_SPEED_10:
   14448 			speed = 10;
   14449 			break;
   14450 		case STATUS_SPEED_100:
   14451 			speed = 100;
   14452 			break;
   14453 		case STATUS_SPEED_1000:
   14454 			speed = 1000;
   14455 			break;
   14456 		default:
   14457 			device_printf(sc->sc_dev, "Unknown speed "
   14458 			    "(status = %08x)\n", status);
   14459 			return -1;
   14460 		}
   14461 
   14462 		/* Rx Packet Buffer Allocation size (KB) */
   14463 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14464 
   14465 		/*
   14466 		 * Determine the maximum latency tolerated by the device.
   14467 		 *
   14468 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14469 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14470 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14471 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14472 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14473 		 */
   14474 		lat_ns = ((int64_t)rxa * 1024 -
   14475 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14476 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14477 		if (lat_ns < 0)
   14478 			lat_ns = 0;
   14479 		else
   14480 			lat_ns /= speed;
   14481 		value = lat_ns;
   14482 
   14483 		while (value > LTRV_VALUE) {
   14484 			scale ++;
   14485 			value = howmany(value, __BIT(5));
   14486 		}
   14487 		if (scale > LTRV_SCALE_MAX) {
   14488 			printf("%s: Invalid LTR latency scale %d\n",
   14489 			    device_xname(sc->sc_dev), scale);
   14490 			return -1;
   14491 		}
   14492 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14493 
   14494 		/* Determine the maximum latency tolerated by the platform */
   14495 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14496 		    WM_PCI_LTR_CAP_LPT);
   14497 		max_snoop = preg & 0xffff;
   14498 		max_nosnoop = preg >> 16;
   14499 
   14500 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14501 
   14502 		if (lat_enc > max_ltr_enc) {
   14503 			lat_enc = max_ltr_enc;
   14504 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14505 			    * PCI_LTR_SCALETONS(
   14506 				    __SHIFTOUT(lat_enc,
   14507 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14508 		}
   14509 
   14510 		if (lat_ns) {
   14511 			lat_ns *= speed * 1000;
   14512 			lat_ns /= 8;
   14513 			lat_ns /= 1000000000;
   14514 			obff_hwm = (int32_t)(rxa - lat_ns);
   14515 		}
   14516 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14517 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14518 			    "(rxa = %d, lat_ns = %d)\n",
   14519 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14520 			return -1;
   14521 		}
   14522 	}
   14523 	/* Snoop and No-Snoop latencies the same */
   14524 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14525 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14526 
   14527 	/* Set OBFF high water mark */
   14528 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14529 	reg |= obff_hwm;
   14530 	CSR_WRITE(sc, WMREG_SVT, reg);
   14531 
   14532 	/* Enable OBFF */
   14533 	reg = CSR_READ(sc, WMREG_SVCR);
   14534 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14535 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14536 
   14537 	return 0;
   14538 }
   14539 
   14540 /*
   14541  * I210 Errata 25 and I211 Errata 10
   14542  * Slow System Clock.
   14543  */
   14544 static void
   14545 wm_pll_workaround_i210(struct wm_softc *sc)
   14546 {
   14547 	uint32_t mdicnfg, wuc;
   14548 	uint32_t reg;
   14549 	pcireg_t pcireg;
   14550 	uint32_t pmreg;
   14551 	uint16_t nvmword, tmp_nvmword;
   14552 	int phyval;
   14553 	bool wa_done = false;
   14554 	int i;
   14555 
   14556 	/* Save WUC and MDICNFG registers */
   14557 	wuc = CSR_READ(sc, WMREG_WUC);
   14558 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14559 
   14560 	reg = mdicnfg & ~MDICNFG_DEST;
   14561 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14562 
   14563 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14564 		nvmword = INVM_DEFAULT_AL;
   14565 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14566 
   14567 	/* Get Power Management cap offset */
   14568 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14569 		&pmreg, NULL) == 0)
   14570 		return;
   14571 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14572 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14573 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14574 
   14575 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14576 			break; /* OK */
   14577 		}
   14578 
   14579 		wa_done = true;
   14580 		/* Directly reset the internal PHY */
   14581 		reg = CSR_READ(sc, WMREG_CTRL);
   14582 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14583 
   14584 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14585 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14586 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14587 
   14588 		CSR_WRITE(sc, WMREG_WUC, 0);
   14589 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14590 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14591 
   14592 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14593 		    pmreg + PCI_PMCSR);
   14594 		pcireg |= PCI_PMCSR_STATE_D3;
   14595 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14596 		    pmreg + PCI_PMCSR, pcireg);
   14597 		delay(1000);
   14598 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14599 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14600 		    pmreg + PCI_PMCSR, pcireg);
   14601 
   14602 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14603 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14604 
   14605 		/* Restore WUC register */
   14606 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14607 	}
   14608 
   14609 	/* Restore MDICNFG setting */
   14610 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14611 	if (wa_done)
   14612 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14613 }
   14614 
   14615 static void
   14616 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14617 {
   14618 	uint32_t reg;
   14619 
   14620 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14621 		device_xname(sc->sc_dev), __func__));
   14622 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14623 
   14624 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14625 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14626 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14627 
   14628 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14629 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14630 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14631 }
   14632