Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.580
      1 /*	$NetBSD: if_wm.c,v 1.580 2018/05/25 04:40:26 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.580 2018/05/25 04:40:26 ozaki-r Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544.  We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames.
    203  */
    204 #define	WM_NTXSEGS		256
    205 #define	WM_IFQUEUELEN		256
    206 #define	WM_TXQUEUELEN_MAX	64
    207 #define	WM_TXQUEUELEN_MAX_82547	16
    208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    211 #define	WM_NTXDESC_82542	256
    212 #define	WM_NTXDESC_82544	4096
    213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    218 
    219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    220 
    221 #define	WM_TXINTERQSIZE		256
    222 
    223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    225 #endif
    226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    228 #endif
    229 
    230 /*
    231  * Receive descriptor list size.  We have one Rx buffer for normal
    232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    233  * packet.  We allocate 256 receive descriptors, each with a 2k
    234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    235  */
    236 #define	WM_NRXDESC		256
    237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    240 
    241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 typedef union txdescs {
    249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    251 } txdescs_t;
    252 
    253 typedef union rxdescs {
    254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    257 } rxdescs_t;
    258 
    259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    261 
    262 /*
    263  * Software state for transmit jobs.
    264  */
    265 struct wm_txsoft {
    266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    268 	int txs_firstdesc;		/* first descriptor in packet */
    269 	int txs_lastdesc;		/* last descriptor in packet */
    270 	int txs_ndesc;			/* # of descriptors used */
    271 };
    272 
    273 /*
    274  * Software state for receive buffers.  Each descriptor gets a
    275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    276  * more than one buffer, we chain them together.
    277  */
    278 struct wm_rxsoft {
    279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    281 };
    282 
    283 #define WM_LINKUP_TIMEOUT	50
    284 
    285 static uint16_t swfwphysem[] = {
    286 	SWFW_PHY0_SM,
    287 	SWFW_PHY1_SM,
    288 	SWFW_PHY2_SM,
    289 	SWFW_PHY3_SM
    290 };
    291 
    292 static const uint32_t wm_82580_rxpbs_table[] = {
    293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    294 };
    295 
    296 struct wm_softc;
    297 
    298 #ifdef WM_EVENT_COUNTERS
    299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    301 	struct evcnt qname##_ev_##evname;
    302 
    303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    304 	do {								\
    305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    307 		    "%s%02d%s", #qname, (qnum), #evname);		\
    308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    309 		    (evtype), NULL, (xname),				\
    310 		    (q)->qname##_##evname##_evcnt_name);		\
    311 	} while (0)
    312 
    313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    315 
    316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    318 
    319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    320 	evcnt_detach(&(q)->qname##_ev_##evname);
    321 #endif /* WM_EVENT_COUNTERS */
    322 
    323 struct wm_txqueue {
    324 	kmutex_t *txq_lock;		/* lock for tx operations */
    325 
    326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    327 
    328 	/* Software state for the transmit descriptors. */
    329 	int txq_num;			/* must be a power of two */
    330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    331 
    332 	/* TX control data structures. */
    333 	int txq_ndesc;			/* must be a power of two */
    334 	size_t txq_descsize;		/* a tx descriptor size */
    335 	txdescs_t *txq_descs_u;
    336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    338 	int txq_desc_rseg;		/* real number of control segment */
    339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    340 #define	txq_descs	txq_descs_u->sctxu_txdescs
    341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    342 
    343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    344 
    345 	int txq_free;			/* number of free Tx descriptors */
    346 	int txq_next;			/* next ready Tx descriptor */
    347 
    348 	int txq_sfree;			/* number of free Tx jobs */
    349 	int txq_snext;			/* next free Tx job */
    350 	int txq_sdirty;			/* dirty Tx jobs */
    351 
    352 	/* These 4 variables are used only on the 82547. */
    353 	int txq_fifo_size;		/* Tx FIFO size */
    354 	int txq_fifo_head;		/* current head of FIFO */
    355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    357 
    358 	/*
    359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    360 	 * CPUs. This queue intermediate them without block.
    361 	 */
    362 	pcq_t *txq_interq;
    363 
    364 	/*
    365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    366 	 * to manage Tx H/W queue's busy flag.
    367 	 */
    368 	int txq_flags;			/* flags for H/W queue, see below */
    369 #define	WM_TXQ_NO_SPACE	0x1
    370 
    371 	bool txq_stopping;
    372 
    373 	bool txq_sending;
    374 	time_t txq_lastsent;
    375 
    376 	uint32_t txq_packets;		/* for AIM */
    377 	uint32_t txq_bytes;		/* for AIM */
    378 #ifdef WM_EVENT_COUNTERS
    379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    384 						/* XXX not used? */
    385 
    386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq, txtusum)		/* TCP/UDP cksums comp. out-bound */
    388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    394 
    395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    396 
    397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    399 #endif /* WM_EVENT_COUNTERS */
    400 };
    401 
    402 struct wm_rxqueue {
    403 	kmutex_t *rxq_lock;		/* lock for rx operations */
    404 
    405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    406 
    407 	/* Software state for the receive descriptors. */
    408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    409 
    410 	/* RX control data structures. */
    411 	int rxq_ndesc;			/* must be a power of two */
    412 	size_t rxq_descsize;		/* a rx descriptor size */
    413 	rxdescs_t *rxq_descs_u;
    414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    416 	int rxq_desc_rseg;		/* real number of control segment */
    417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    421 
    422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    423 
    424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    425 	int rxq_discard;
    426 	int rxq_len;
    427 	struct mbuf *rxq_head;
    428 	struct mbuf *rxq_tail;
    429 	struct mbuf **rxq_tailp;
    430 
    431 	bool rxq_stopping;
    432 
    433 	uint32_t rxq_packets;		/* for AIM */
    434 	uint32_t rxq_bytes;		/* for AIM */
    435 #ifdef WM_EVENT_COUNTERS
    436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    438 
    439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    441 #endif
    442 };
    443 
    444 struct wm_queue {
    445 	int wmq_id;			/* index of TX/RX queues */
    446 	int wmq_intr_idx;		/* index of MSI-X tables */
    447 
    448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    449 	bool wmq_set_itr;
    450 
    451 	struct wm_txqueue wmq_txq;
    452 	struct wm_rxqueue wmq_rxq;
    453 
    454 	void *wmq_si;
    455 };
    456 
    457 struct wm_phyop {
    458 	int (*acquire)(struct wm_softc *);
    459 	void (*release)(struct wm_softc *);
    460 	int reset_delay_us;
    461 };
    462 
    463 struct wm_nvmop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    467 };
    468 
    469 /*
    470  * Software state per device.
    471  */
    472 struct wm_softc {
    473 	device_t sc_dev;		/* generic device information */
    474 	bus_space_tag_t sc_st;		/* bus space tag */
    475 	bus_space_handle_t sc_sh;	/* bus space handle */
    476 	bus_size_t sc_ss;		/* bus space size */
    477 	bus_space_tag_t sc_iot;		/* I/O space tag */
    478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    479 	bus_size_t sc_ios;		/* I/O space size */
    480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    482 	bus_size_t sc_flashs;		/* flash registers space size */
    483 	off_t sc_flashreg_offset;	/*
    484 					 * offset to flash registers from
    485 					 * start of BAR
    486 					 */
    487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    488 
    489 	struct ethercom sc_ethercom;	/* ethernet common data */
    490 	struct mii_data sc_mii;		/* MII/media information */
    491 
    492 	pci_chipset_tag_t sc_pc;
    493 	pcitag_t sc_pcitag;
    494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    496 
    497 	uint16_t sc_pcidevid;		/* PCI device ID */
    498 	wm_chip_type sc_type;		/* MAC type */
    499 	int sc_rev;			/* MAC revision */
    500 	wm_phy_type sc_phytype;		/* PHY type */
    501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    502 #define	WM_MEDIATYPE_UNKNOWN		0x00
    503 #define	WM_MEDIATYPE_FIBER		0x01
    504 #define	WM_MEDIATYPE_COPPER		0x02
    505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    507 	int sc_flags;			/* flags; see below */
    508 	int sc_if_flags;		/* last if_flags */
    509 	int sc_flowflags;		/* 802.3x flow control flags */
    510 	int sc_align_tweak;
    511 
    512 	void *sc_ihs[WM_MAX_NINTR];	/*
    513 					 * interrupt cookie.
    514 					 * - legacy and msi use sc_ihs[0] only
    515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    516 					 */
    517 	pci_intr_handle_t *sc_intrs;	/*
    518 					 * legacy and msi use sc_intrs[0] only
    519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    520 					 */
    521 	int sc_nintrs;			/* number of interrupts */
    522 
    523 	int sc_link_intr_idx;		/* index of MSI-X tables */
    524 
    525 	callout_t sc_tick_ch;		/* tick callout */
    526 	bool sc_core_stopping;
    527 
    528 	int sc_nvm_ver_major;
    529 	int sc_nvm_ver_minor;
    530 	int sc_nvm_ver_build;
    531 	int sc_nvm_addrbits;		/* NVM address bits */
    532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    533 	int sc_ich8_flash_base;
    534 	int sc_ich8_flash_bank_size;
    535 	int sc_nvm_k1_enabled;
    536 
    537 	int sc_nqueues;
    538 	struct wm_queue *sc_queue;
    539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    543 
    544 	int sc_affinity_offset;
    545 
    546 #ifdef WM_EVENT_COUNTERS
    547 	/* Event counters. */
    548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    549 
    550         /* WM_T_82542_2_1 only */
    551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    556 #endif /* WM_EVENT_COUNTERS */
    557 
    558 	/* This variable are used only on the 82547. */
    559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    560 
    561 	uint32_t sc_ctrl;		/* prototype CTRL register */
    562 #if 0
    563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    564 #endif
    565 	uint32_t sc_icr;		/* prototype interrupt bits */
    566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    567 	uint32_t sc_tctl;		/* prototype TCTL register */
    568 	uint32_t sc_rctl;		/* prototype RCTL register */
    569 	uint32_t sc_txcw;		/* prototype TXCW register */
    570 	uint32_t sc_tipg;		/* prototype TIPG register */
    571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    572 	uint32_t sc_pba;		/* prototype PBA register */
    573 
    574 	int sc_tbi_linkup;		/* TBI link status */
    575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    577 
    578 	int sc_mchash_type;		/* multicast filter offset */
    579 
    580 	krndsource_t rnd_source;	/* random source */
    581 
    582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    583 
    584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    585 	kmutex_t *sc_ich_phymtx;	/*
    586 					 * 82574/82583/ICH/PCH specific PHY
    587 					 * mutex. For 82574/82583, the mutex
    588 					 * is used for both PHY and NVM.
    589 					 */
    590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    591 
    592 	struct wm_phyop phy;
    593 	struct wm_nvmop nvm;
    594 };
    595 
    596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    599 
    600 #define	WM_RXCHAIN_RESET(rxq)						\
    601 do {									\
    602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    603 	*(rxq)->rxq_tailp = NULL;					\
    604 	(rxq)->rxq_len = 0;						\
    605 } while (/*CONSTCOND*/0)
    606 
    607 #define	WM_RXCHAIN_LINK(rxq, m)						\
    608 do {									\
    609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    610 	(rxq)->rxq_tailp = &(m)->m_next;				\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #ifdef WM_EVENT_COUNTERS
    614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    616 
    617 #define WM_Q_EVCNT_INCR(qname, evname)			\
    618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    621 #else /* !WM_EVENT_COUNTERS */
    622 #define	WM_EVCNT_INCR(ev)	/* nothing */
    623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    627 #endif /* !WM_EVENT_COUNTERS */
    628 
    629 #define	CSR_READ(sc, reg)						\
    630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    631 #define	CSR_WRITE(sc, reg, val)						\
    632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    633 #define	CSR_WRITE_FLUSH(sc)						\
    634 	(void) CSR_READ((sc), WMREG_STATUS)
    635 
    636 #define ICH8_FLASH_READ32(sc, reg)					\
    637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    638 	    (reg) + sc->sc_flashreg_offset)
    639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    641 	    (reg) + sc->sc_flashreg_offset, (data))
    642 
    643 #define ICH8_FLASH_READ16(sc, reg)					\
    644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    645 	    (reg) + sc->sc_flashreg_offset)
    646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset, (data))
    649 
    650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    652 
    653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    654 #define	WM_CDTXADDR_HI(txq, x)						\
    655 	(sizeof(bus_addr_t) == 8 ?					\
    656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    657 
    658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    659 #define	WM_CDRXADDR_HI(rxq, x)						\
    660 	(sizeof(bus_addr_t) == 8 ?					\
    661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    662 
    663 /*
    664  * Register read/write functions.
    665  * Other than CSR_{READ|WRITE}().
    666  */
    667 #if 0
    668 static inline uint32_t wm_io_read(struct wm_softc *, int);
    669 #endif
    670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    672 	uint32_t, uint32_t);
    673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    674 
    675 /*
    676  * Descriptor sync/init functions.
    677  */
    678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    681 
    682 /*
    683  * Device driver interface functions and commonly used functions.
    684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    685  */
    686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    687 static int	wm_match(device_t, cfdata_t, void *);
    688 static void	wm_attach(device_t, device_t, void *);
    689 static int	wm_detach(device_t, int);
    690 static bool	wm_suspend(device_t, const pmf_qual_t *);
    691 static bool	wm_resume(device_t, const pmf_qual_t *);
    692 static void	wm_watchdog(struct ifnet *);
    693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    694     uint16_t *);
    695 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    696     uint16_t *);
    697 static void	wm_tick(void *);
    698 static int	wm_ifflags_cb(struct ethercom *);
    699 static int	wm_ioctl(struct ifnet *, u_long, void *);
    700 /* MAC address related */
    701 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    702 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    703 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    704 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    705 static void	wm_set_filter(struct wm_softc *);
    706 /* Reset and init related */
    707 static void	wm_set_vlan(struct wm_softc *);
    708 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    709 static void	wm_get_auto_rd_done(struct wm_softc *);
    710 static void	wm_lan_init_done(struct wm_softc *);
    711 static void	wm_get_cfg_done(struct wm_softc *);
    712 static void	wm_phy_post_reset(struct wm_softc *);
    713 static void	wm_write_smbus_addr(struct wm_softc *);
    714 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    715 static void	wm_initialize_hardware_bits(struct wm_softc *);
    716 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    717 static void	wm_reset_phy(struct wm_softc *);
    718 static void	wm_flush_desc_rings(struct wm_softc *);
    719 static void	wm_reset(struct wm_softc *);
    720 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    721 static void	wm_rxdrain(struct wm_rxqueue *);
    722 static void	wm_init_rss(struct wm_softc *);
    723 static void	wm_adjust_qnum(struct wm_softc *, int);
    724 static inline bool	wm_is_using_msix(struct wm_softc *);
    725 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    726 static int	wm_softint_establish(struct wm_softc *, int, int);
    727 static int	wm_setup_legacy(struct wm_softc *);
    728 static int	wm_setup_msix(struct wm_softc *);
    729 static int	wm_init(struct ifnet *);
    730 static int	wm_init_locked(struct ifnet *);
    731 static void	wm_unset_stopping_flags(struct wm_softc *);
    732 static void	wm_set_stopping_flags(struct wm_softc *);
    733 static void	wm_stop(struct ifnet *, int);
    734 static void	wm_stop_locked(struct ifnet *, int);
    735 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    736 static void	wm_82547_txfifo_stall(void *);
    737 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    738 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    739 /* DMA related */
    740 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    741 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    742 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    743 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    744     struct wm_txqueue *);
    745 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    746 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    747 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    748     struct wm_rxqueue *);
    749 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    752 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    754 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    758     struct wm_rxqueue *);
    759 static int	wm_alloc_txrx_queues(struct wm_softc *);
    760 static void	wm_free_txrx_queues(struct wm_softc *);
    761 static int	wm_init_txrx_queues(struct wm_softc *);
    762 /* Start */
    763 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    764     struct wm_txsoft *, uint32_t *, uint8_t *);
    765 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    766 static void	wm_start(struct ifnet *);
    767 static void	wm_start_locked(struct ifnet *);
    768 static int	wm_transmit(struct ifnet *, struct mbuf *);
    769 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    770 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    771     bool);
    772 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    774 static void	wm_nq_start(struct ifnet *);
    775 static void	wm_nq_start_locked(struct ifnet *);
    776 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    777 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    778 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    779     bool);
    780 static void	wm_deferred_start_locked(struct wm_txqueue *);
    781 static void	wm_handle_queue(void *);
    782 /* Interrupt */
    783 static bool	wm_txeof(struct wm_txqueue *, u_int);
    784 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    785 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    786 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    787 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    788 static void	wm_linkintr(struct wm_softc *, uint32_t);
    789 static int	wm_intr_legacy(void *);
    790 static inline void	wm_txrxintr_disable(struct wm_queue *);
    791 static inline void	wm_txrxintr_enable(struct wm_queue *);
    792 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    793 static int	wm_txrxintr_msix(void *);
    794 static int	wm_linkintr_msix(void *);
    795 
    796 /*
    797  * Media related.
    798  * GMII, SGMII, TBI, SERDES and SFP.
    799  */
    800 /* Common */
    801 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    802 /* GMII related */
    803 static void	wm_gmii_reset(struct wm_softc *);
    804 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    805 static int	wm_get_phy_id_82575(struct wm_softc *);
    806 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    807 static int	wm_gmii_mediachange(struct ifnet *);
    808 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    809 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    810 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    811 static int	wm_gmii_i82543_readreg(device_t, int, int);
    812 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    813 static int	wm_gmii_mdic_readreg(device_t, int, int);
    814 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    815 static int	wm_gmii_i82544_readreg(device_t, int, int);
    816 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    817 static int	wm_gmii_i80003_readreg(device_t, int, int);
    818 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    819 static int	wm_gmii_bm_readreg(device_t, int, int);
    820 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    821 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    822 static int	wm_gmii_hv_readreg(device_t, int, int);
    823 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    824 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    825 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    826 static int	wm_gmii_82580_readreg(device_t, int, int);
    827 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    828 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    829 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    830 static void	wm_gmii_statchg(struct ifnet *);
    831 /*
    832  * kumeran related (80003, ICH* and PCH*).
    833  * These functions are not for accessing MII registers but for accessing
    834  * kumeran specific registers.
    835  */
    836 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    837 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    838 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    839 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    840 /* SGMII */
    841 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    842 static int	wm_sgmii_readreg(device_t, int, int);
    843 static void	wm_sgmii_writereg(device_t, int, int, int);
    844 /* TBI related */
    845 static void	wm_tbi_mediainit(struct wm_softc *);
    846 static int	wm_tbi_mediachange(struct ifnet *);
    847 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    848 static int	wm_check_for_link(struct wm_softc *);
    849 static void	wm_tbi_tick(struct wm_softc *);
    850 /* SERDES related */
    851 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    852 static int	wm_serdes_mediachange(struct ifnet *);
    853 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    854 static void	wm_serdes_tick(struct wm_softc *);
    855 /* SFP related */
    856 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    857 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    858 
    859 /*
    860  * NVM related.
    861  * Microwire, SPI (w/wo EERD) and Flash.
    862  */
    863 /* Misc functions */
    864 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    865 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    866 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    867 /* Microwire */
    868 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    869 /* SPI */
    870 static int	wm_nvm_ready_spi(struct wm_softc *);
    871 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    872 /* Using with EERD */
    873 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    874 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    875 /* Flash */
    876 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    877     unsigned int *);
    878 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    879 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    880 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    881 	uint32_t *);
    882 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    883 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    884 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    885 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    886 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    887 /* iNVM */
    888 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    889 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    890 /* Lock, detecting NVM type, validate checksum and read */
    891 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    892 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    893 static int	wm_nvm_validate_checksum(struct wm_softc *);
    894 static void	wm_nvm_version_invm(struct wm_softc *);
    895 static void	wm_nvm_version(struct wm_softc *);
    896 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    897 
    898 /*
    899  * Hardware semaphores.
    900  * Very complexed...
    901  */
    902 static int	wm_get_null(struct wm_softc *);
    903 static void	wm_put_null(struct wm_softc *);
    904 static int	wm_get_eecd(struct wm_softc *);
    905 static void	wm_put_eecd(struct wm_softc *);
    906 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    907 static void	wm_put_swsm_semaphore(struct wm_softc *);
    908 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    909 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    910 static int	wm_get_nvm_80003(struct wm_softc *);
    911 static void	wm_put_nvm_80003(struct wm_softc *);
    912 static int	wm_get_nvm_82571(struct wm_softc *);
    913 static void	wm_put_nvm_82571(struct wm_softc *);
    914 static int	wm_get_phy_82575(struct wm_softc *);
    915 static void	wm_put_phy_82575(struct wm_softc *);
    916 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    917 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    918 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    919 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    920 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    921 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    922 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    923 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    924 
    925 /*
    926  * Management mode and power management related subroutines.
    927  * BMC, AMT, suspend/resume and EEE.
    928  */
    929 #if 0
    930 static int	wm_check_mng_mode(struct wm_softc *);
    931 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    932 static int	wm_check_mng_mode_82574(struct wm_softc *);
    933 static int	wm_check_mng_mode_generic(struct wm_softc *);
    934 #endif
    935 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    936 static bool	wm_phy_resetisblocked(struct wm_softc *);
    937 static void	wm_get_hw_control(struct wm_softc *);
    938 static void	wm_release_hw_control(struct wm_softc *);
    939 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    940 static void	wm_smbustopci(struct wm_softc *);
    941 static void	wm_init_manageability(struct wm_softc *);
    942 static void	wm_release_manageability(struct wm_softc *);
    943 static void	wm_get_wakeup(struct wm_softc *);
    944 static void	wm_ulp_disable(struct wm_softc *);
    945 static void	wm_enable_phy_wakeup(struct wm_softc *);
    946 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    947 static void	wm_enable_wakeup(struct wm_softc *);
    948 static void	wm_disable_aspm(struct wm_softc *);
    949 /* LPLU (Low Power Link Up) */
    950 static void	wm_lplu_d0_disable(struct wm_softc *);
    951 /* EEE */
    952 static void	wm_set_eee_i350(struct wm_softc *);
    953 
    954 /*
    955  * Workarounds (mainly PHY related).
    956  * Basically, PHY's workarounds are in the PHY drivers.
    957  */
    958 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    960 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    961 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    962 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    963 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    964 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    965 static void	wm_reset_init_script_82575(struct wm_softc *);
    966 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    967 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    968 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    969 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    970 static void	wm_pll_workaround_i210(struct wm_softc *);
    971 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    972 
    973 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    974     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    975 
    976 /*
    977  * Devices supported by this driver.
    978  */
    979 static const struct wm_product {
    980 	pci_vendor_id_t		wmp_vendor;
    981 	pci_product_id_t	wmp_product;
    982 	const char		*wmp_name;
    983 	wm_chip_type		wmp_type;
    984 	uint32_t		wmp_flags;
    985 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    986 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    987 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    988 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    989 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    990 } wm_products[] = {
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    992 	  "Intel i82542 1000BASE-X Ethernet",
    993 	  WM_T_82542_2_1,	WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    996 	  "Intel i82543GC 1000BASE-X Ethernet",
    997 	  WM_T_82543,		WMP_F_FIBER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1000 	  "Intel i82543GC 1000BASE-T Ethernet",
   1001 	  WM_T_82543,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1004 	  "Intel i82544EI 1000BASE-T Ethernet",
   1005 	  WM_T_82544,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1008 	  "Intel i82544EI 1000BASE-X Ethernet",
   1009 	  WM_T_82544,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1012 	  "Intel i82544GC 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1016 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1017 	  WM_T_82544,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1020 	  "Intel i82540EM 1000BASE-T Ethernet",
   1021 	  WM_T_82540,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1024 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82540,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1028 	  "Intel i82540EP 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1032 	  "Intel i82540EP 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1040 	  "Intel i82545EM 1000BASE-T Ethernet",
   1041 	  WM_T_82545,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1044 	  "Intel i82545GM 1000BASE-T Ethernet",
   1045 	  WM_T_82545_3,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1048 	  "Intel i82545GM 1000BASE-X Ethernet",
   1049 	  WM_T_82545_3,		WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1052 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1053 	  WM_T_82545_3,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1056 	  "Intel i82546EB 1000BASE-T Ethernet",
   1057 	  WM_T_82546,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1060 	  "Intel i82546EB 1000BASE-T Ethernet",
   1061 	  WM_T_82546,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1064 	  "Intel i82545EM 1000BASE-X Ethernet",
   1065 	  WM_T_82545,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1068 	  "Intel i82546EB 1000BASE-X Ethernet",
   1069 	  WM_T_82546,		WMP_F_FIBER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1072 	  "Intel i82546GB 1000BASE-T Ethernet",
   1073 	  WM_T_82546_3,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1076 	  "Intel i82546GB 1000BASE-X Ethernet",
   1077 	  WM_T_82546_3,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1080 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82546_3,		WMP_F_SERDES },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1084 	  "i82546GB quad-port Gigabit Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1088 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1092 	  "Intel PRO/1000MT (82546GB)",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1096 	  "Intel i82541EI 1000BASE-T Ethernet",
   1097 	  WM_T_82541,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1100 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1101 	  WM_T_82541,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1104 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1108 	  "Intel i82541ER 1000BASE-T Ethernet",
   1109 	  WM_T_82541_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1112 	  "Intel i82541GI 1000BASE-T Ethernet",
   1113 	  WM_T_82541_2,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1116 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1120 	  "Intel i82541PI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1124 	  "Intel i82547EI 1000BASE-T Ethernet",
   1125 	  WM_T_82547,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1128 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1129 	  WM_T_82547,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1132 	  "Intel i82547GI 1000BASE-T Ethernet",
   1133 	  WM_T_82547_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1136 	  "Intel PRO/1000 PT (82571EB)",
   1137 	  WM_T_82571,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1140 	  "Intel PRO/1000 PF (82571EB)",
   1141 	  WM_T_82571,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1144 	  "Intel PRO/1000 PB (82571EB)",
   1145 	  WM_T_82571,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1148 	  "Intel PRO/1000 QT (82571EB)",
   1149 	  WM_T_82571,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1152 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1153 	  WM_T_82571,		WMP_F_COPPER, },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1156 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1157 	  WM_T_82571,		WMP_F_COPPER, },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1160 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1161 	  WM_T_82571,		WMP_F_SERDES, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1164 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1165 	  WM_T_82571,		WMP_F_SERDES, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1168 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1169 	  WM_T_82571,		WMP_F_FIBER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1172 	  "Intel i82572EI 1000baseT Ethernet",
   1173 	  WM_T_82572,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1176 	  "Intel i82572EI 1000baseX Ethernet",
   1177 	  WM_T_82572,		WMP_F_FIBER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1180 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1181 	  WM_T_82572,		WMP_F_SERDES },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1184 	  "Intel i82572EI 1000baseT Ethernet",
   1185 	  WM_T_82572,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1188 	  "Intel i82573E",
   1189 	  WM_T_82573,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1192 	  "Intel i82573E IAMT",
   1193 	  WM_T_82573,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1196 	  "Intel i82573L Gigabit Ethernet",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1200 	  "Intel i82574L",
   1201 	  WM_T_82574,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1204 	  "Intel i82574L",
   1205 	  WM_T_82574,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1208 	  "Intel i82583V",
   1209 	  WM_T_82583,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1212 	  "i80003 dual 1000baseT Ethernet",
   1213 	  WM_T_80003,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1216 	  "i80003 dual 1000baseX Ethernet",
   1217 	  WM_T_80003,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1220 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1221 	  WM_T_80003,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1224 	  "Intel i80003 1000baseT Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1228 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1232 	  "Intel i82801H (M_AMT) LAN Controller",
   1233 	  WM_T_ICH8,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1235 	  "Intel i82801H (AMT) LAN Controller",
   1236 	  WM_T_ICH8,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1238 	  "Intel i82801H LAN Controller",
   1239 	  WM_T_ICH8,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1241 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1242 	  WM_T_ICH8,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1244 	  "Intel i82801H (M) LAN Controller",
   1245 	  WM_T_ICH8,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1247 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1248 	  WM_T_ICH8,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1250 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1251 	  WM_T_ICH8,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1253 	  "82567V-3 LAN Controller",
   1254 	  WM_T_ICH8,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1256 	  "82801I (AMT) LAN Controller",
   1257 	  WM_T_ICH9,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1259 	  "82801I 10/100 LAN Controller",
   1260 	  WM_T_ICH9,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1262 	  "82801I (G) 10/100 LAN Controller",
   1263 	  WM_T_ICH9,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1265 	  "82801I (GT) 10/100 LAN Controller",
   1266 	  WM_T_ICH9,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1268 	  "82801I (C) LAN Controller",
   1269 	  WM_T_ICH9,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1271 	  "82801I mobile LAN Controller",
   1272 	  WM_T_ICH9,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1274 	  "82801I mobile (V) LAN Controller",
   1275 	  WM_T_ICH9,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1277 	  "82801I mobile (AMT) LAN Controller",
   1278 	  WM_T_ICH9,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1280 	  "82567LM-4 LAN Controller",
   1281 	  WM_T_ICH9,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1283 	  "82567LM-2 LAN Controller",
   1284 	  WM_T_ICH10,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1286 	  "82567LF-2 LAN Controller",
   1287 	  WM_T_ICH10,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1289 	  "82567LM-3 LAN Controller",
   1290 	  WM_T_ICH10,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1292 	  "82567LF-3 LAN Controller",
   1293 	  WM_T_ICH10,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1295 	  "82567V-2 LAN Controller",
   1296 	  WM_T_ICH10,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1298 	  "82567V-3? LAN Controller",
   1299 	  WM_T_ICH10,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1301 	  "HANKSVILLE LAN Controller",
   1302 	  WM_T_ICH10,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1304 	  "PCH LAN (82577LM) Controller",
   1305 	  WM_T_PCH,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1307 	  "PCH LAN (82577LC) Controller",
   1308 	  WM_T_PCH,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1310 	  "PCH LAN (82578DM) Controller",
   1311 	  WM_T_PCH,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1313 	  "PCH LAN (82578DC) Controller",
   1314 	  WM_T_PCH,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1316 	  "PCH2 LAN (82579LM) Controller",
   1317 	  WM_T_PCH2,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1319 	  "PCH2 LAN (82579V) Controller",
   1320 	  WM_T_PCH2,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1322 	  "82575EB dual-1000baseT Ethernet",
   1323 	  WM_T_82575,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1325 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1326 	  WM_T_82575,		WMP_F_SERDES },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1328 	  "82575GB quad-1000baseT Ethernet",
   1329 	  WM_T_82575,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1331 	  "82575GB quad-1000baseT Ethernet (PM)",
   1332 	  WM_T_82575,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1334 	  "82576 1000BaseT Ethernet",
   1335 	  WM_T_82576,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1337 	  "82576 1000BaseX Ethernet",
   1338 	  WM_T_82576,		WMP_F_FIBER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1341 	  "82576 gigabit Ethernet (SERDES)",
   1342 	  WM_T_82576,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1345 	  "82576 quad-1000BaseT Ethernet",
   1346 	  WM_T_82576,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1349 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1350 	  WM_T_82576,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1353 	  "82576 gigabit Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1357 	  "82576 gigabit Ethernet (SERDES)",
   1358 	  WM_T_82576,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1360 	  "82576 quad-gigabit Ethernet (SERDES)",
   1361 	  WM_T_82576,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1364 	  "82580 1000BaseT Ethernet",
   1365 	  WM_T_82580,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1367 	  "82580 1000BaseX Ethernet",
   1368 	  WM_T_82580,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1371 	  "82580 1000BaseT Ethernet (SERDES)",
   1372 	  WM_T_82580,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1375 	  "82580 gigabit Ethernet (SGMII)",
   1376 	  WM_T_82580,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1378 	  "82580 dual-1000BaseT Ethernet",
   1379 	  WM_T_82580,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1382 	  "82580 quad-1000BaseX Ethernet",
   1383 	  WM_T_82580,		WMP_F_FIBER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1386 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1390 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1391 	  WM_T_82580,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1394 	  "DH89XXCC 1000BASE-KX Ethernet",
   1395 	  WM_T_82580,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1398 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1402 	  "I350 Gigabit Network Connection",
   1403 	  WM_T_I350,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1406 	  "I350 Gigabit Fiber Network Connection",
   1407 	  WM_T_I350,		WMP_F_FIBER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1410 	  "I350 Gigabit Backplane Connection",
   1411 	  WM_T_I350,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1414 	  "I350 Quad Port Gigabit Ethernet",
   1415 	  WM_T_I350,		WMP_F_SERDES },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1418 	  "I350 Gigabit Connection",
   1419 	  WM_T_I350,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1422 	  "I354 Gigabit Ethernet (KX)",
   1423 	  WM_T_I354,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1426 	  "I354 Gigabit Ethernet (SGMII)",
   1427 	  WM_T_I354,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1430 	  "I354 Gigabit Ethernet (2.5G)",
   1431 	  WM_T_I354,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1434 	  "I210-T1 Ethernet Server Adapter",
   1435 	  WM_T_I210,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1438 	  "I210 Ethernet (Copper OEM)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1442 	  "I210 Ethernet (Copper IT)",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1446 	  "I210 Ethernet (FLASH less)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1450 	  "I210 Gigabit Ethernet (Fiber)",
   1451 	  WM_T_I210,		WMP_F_FIBER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1454 	  "I210 Gigabit Ethernet (SERDES)",
   1455 	  WM_T_I210,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1458 	  "I210 Gigabit Ethernet (FLASH less)",
   1459 	  WM_T_I210,		WMP_F_SERDES },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1462 	  "I210 Gigabit Ethernet (SGMII)",
   1463 	  WM_T_I210,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1466 	  "I211 Ethernet (COPPER)",
   1467 	  WM_T_I211,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1469 	  "I217 V Ethernet Connection",
   1470 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1472 	  "I217 LM Ethernet Connection",
   1473 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1475 	  "I218 V Ethernet Connection",
   1476 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1478 	  "I218 V Ethernet Connection",
   1479 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1481 	  "I218 V Ethernet Connection",
   1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1484 	  "I218 LM Ethernet Connection",
   1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1487 	  "I218 LM Ethernet Connection",
   1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1490 	  "I218 LM Ethernet Connection",
   1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1493 	  "I219 V Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1496 	  "I219 V Ethernet Connection",
   1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1499 	  "I219 V Ethernet Connection",
   1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1502 	  "I219 V Ethernet Connection",
   1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1505 	  "I219 LM Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1508 	  "I219 LM Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1511 	  "I219 LM Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1514 	  "I219 LM Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1517 	  "I219 LM Ethernet Connection",
   1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1520 	  "I219 V Ethernet Connection",
   1521 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1523 	  "I219 V Ethernet Connection",
   1524 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1529 	  "I219 LM Ethernet Connection",
   1530 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1531 	{ 0,			0,
   1532 	  NULL,
   1533 	  0,			0 },
   1534 };
   1535 
   1536 /*
   1537  * Register read/write functions.
   1538  * Other than CSR_{READ|WRITE}().
   1539  */
   1540 
   1541 #if 0 /* Not currently used */
   1542 static inline uint32_t
   1543 wm_io_read(struct wm_softc *sc, int reg)
   1544 {
   1545 
   1546 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1547 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1548 }
   1549 #endif
   1550 
   1551 static inline void
   1552 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1553 {
   1554 
   1555 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1556 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1557 }
   1558 
   1559 static inline void
   1560 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1561     uint32_t data)
   1562 {
   1563 	uint32_t regval;
   1564 	int i;
   1565 
   1566 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1567 
   1568 	CSR_WRITE(sc, reg, regval);
   1569 
   1570 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1571 		delay(5);
   1572 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1573 			break;
   1574 	}
   1575 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1576 		aprint_error("%s: WARNING:"
   1577 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1578 		    device_xname(sc->sc_dev), reg);
   1579 	}
   1580 }
   1581 
   1582 static inline void
   1583 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1584 {
   1585 	wa->wa_low = htole32(v & 0xffffffffU);
   1586 	if (sizeof(bus_addr_t) == 8)
   1587 		wa->wa_high = htole32((uint64_t) v >> 32);
   1588 	else
   1589 		wa->wa_high = 0;
   1590 }
   1591 
   1592 /*
   1593  * Descriptor sync/init functions.
   1594  */
   1595 static inline void
   1596 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1597 {
   1598 	struct wm_softc *sc = txq->txq_sc;
   1599 
   1600 	/* If it will wrap around, sync to the end of the ring. */
   1601 	if ((start + num) > WM_NTXDESC(txq)) {
   1602 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1603 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1604 		    (WM_NTXDESC(txq) - start), ops);
   1605 		num -= (WM_NTXDESC(txq) - start);
   1606 		start = 0;
   1607 	}
   1608 
   1609 	/* Now sync whatever is left. */
   1610 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1612 }
   1613 
   1614 static inline void
   1615 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1616 {
   1617 	struct wm_softc *sc = rxq->rxq_sc;
   1618 
   1619 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1620 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1621 }
   1622 
   1623 static inline void
   1624 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1625 {
   1626 	struct wm_softc *sc = rxq->rxq_sc;
   1627 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1628 	struct mbuf *m = rxs->rxs_mbuf;
   1629 
   1630 	/*
   1631 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1632 	 * so that the payload after the Ethernet header is aligned
   1633 	 * to a 4-byte boundary.
   1634 
   1635 	 * XXX BRAINDAMAGE ALERT!
   1636 	 * The stupid chip uses the same size for every buffer, which
   1637 	 * is set in the Receive Control register.  We are using the 2K
   1638 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1639 	 * reason, we can't "scoot" packets longer than the standard
   1640 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1641 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1642 	 * the upper layer copy the headers.
   1643 	 */
   1644 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1645 
   1646 	if (sc->sc_type == WM_T_82574) {
   1647 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1648 		rxd->erx_data.erxd_addr =
   1649 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1650 		rxd->erx_data.erxd_dd = 0;
   1651 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1652 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1653 
   1654 		rxd->nqrx_data.nrxd_paddr =
   1655 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1656 		/* Currently, split header is not supported. */
   1657 		rxd->nqrx_data.nrxd_haddr = 0;
   1658 	} else {
   1659 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1660 
   1661 		wm_set_dma_addr(&rxd->wrx_addr,
   1662 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1663 		rxd->wrx_len = 0;
   1664 		rxd->wrx_cksum = 0;
   1665 		rxd->wrx_status = 0;
   1666 		rxd->wrx_errors = 0;
   1667 		rxd->wrx_special = 0;
   1668 	}
   1669 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1670 
   1671 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1672 }
   1673 
   1674 /*
   1675  * Device driver interface functions and commonly used functions.
   1676  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1677  */
   1678 
   1679 /* Lookup supported device table */
   1680 static const struct wm_product *
   1681 wm_lookup(const struct pci_attach_args *pa)
   1682 {
   1683 	const struct wm_product *wmp;
   1684 
   1685 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1686 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1687 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1688 			return wmp;
   1689 	}
   1690 	return NULL;
   1691 }
   1692 
   1693 /* The match function (ca_match) */
   1694 static int
   1695 wm_match(device_t parent, cfdata_t cf, void *aux)
   1696 {
   1697 	struct pci_attach_args *pa = aux;
   1698 
   1699 	if (wm_lookup(pa) != NULL)
   1700 		return 1;
   1701 
   1702 	return 0;
   1703 }
   1704 
   1705 /* The attach function (ca_attach) */
   1706 static void
   1707 wm_attach(device_t parent, device_t self, void *aux)
   1708 {
   1709 	struct wm_softc *sc = device_private(self);
   1710 	struct pci_attach_args *pa = aux;
   1711 	prop_dictionary_t dict;
   1712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1713 	pci_chipset_tag_t pc = pa->pa_pc;
   1714 	int counts[PCI_INTR_TYPE_SIZE];
   1715 	pci_intr_type_t max_type;
   1716 	const char *eetype, *xname;
   1717 	bus_space_tag_t memt;
   1718 	bus_space_handle_t memh;
   1719 	bus_size_t memsize;
   1720 	int memh_valid;
   1721 	int i, error;
   1722 	const struct wm_product *wmp;
   1723 	prop_data_t ea;
   1724 	prop_number_t pn;
   1725 	uint8_t enaddr[ETHER_ADDR_LEN];
   1726 	char buf[256];
   1727 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1728 	pcireg_t preg, memtype;
   1729 	uint16_t eeprom_data, apme_mask;
   1730 	bool force_clear_smbi;
   1731 	uint32_t link_mode;
   1732 	uint32_t reg;
   1733 
   1734 	sc->sc_dev = self;
   1735 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1736 	sc->sc_core_stopping = false;
   1737 
   1738 	wmp = wm_lookup(pa);
   1739 #ifdef DIAGNOSTIC
   1740 	if (wmp == NULL) {
   1741 		printf("\n");
   1742 		panic("wm_attach: impossible");
   1743 	}
   1744 #endif
   1745 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1746 
   1747 	sc->sc_pc = pa->pa_pc;
   1748 	sc->sc_pcitag = pa->pa_tag;
   1749 
   1750 	if (pci_dma64_available(pa))
   1751 		sc->sc_dmat = pa->pa_dmat64;
   1752 	else
   1753 		sc->sc_dmat = pa->pa_dmat;
   1754 
   1755 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1756 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1757 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1758 
   1759 	sc->sc_type = wmp->wmp_type;
   1760 
   1761 	/* Set default function pointers */
   1762 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1763 	sc->phy.release = sc->nvm.release = wm_put_null;
   1764 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1765 
   1766 	if (sc->sc_type < WM_T_82543) {
   1767 		if (sc->sc_rev < 2) {
   1768 			aprint_error_dev(sc->sc_dev,
   1769 			    "i82542 must be at least rev. 2\n");
   1770 			return;
   1771 		}
   1772 		if (sc->sc_rev < 3)
   1773 			sc->sc_type = WM_T_82542_2_0;
   1774 	}
   1775 
   1776 	/*
   1777 	 * Disable MSI for Errata:
   1778 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1779 	 *
   1780 	 *  82544: Errata 25
   1781 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1782 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1783 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1784 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1785 	 *
   1786 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1787 	 *
   1788 	 *  82571 & 82572: Errata 63
   1789 	 */
   1790 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1791 	    || (sc->sc_type == WM_T_82572))
   1792 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1793 
   1794 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1795 	    || (sc->sc_type == WM_T_82580)
   1796 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1797 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1798 		sc->sc_flags |= WM_F_NEWQUEUE;
   1799 
   1800 	/* Set device properties (mactype) */
   1801 	dict = device_properties(sc->sc_dev);
   1802 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1803 
   1804 	/*
   1805 	 * Map the device.  All devices support memory-mapped acccess,
   1806 	 * and it is really required for normal operation.
   1807 	 */
   1808 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1809 	switch (memtype) {
   1810 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1811 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1812 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1813 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1814 		break;
   1815 	default:
   1816 		memh_valid = 0;
   1817 		break;
   1818 	}
   1819 
   1820 	if (memh_valid) {
   1821 		sc->sc_st = memt;
   1822 		sc->sc_sh = memh;
   1823 		sc->sc_ss = memsize;
   1824 	} else {
   1825 		aprint_error_dev(sc->sc_dev,
   1826 		    "unable to map device registers\n");
   1827 		return;
   1828 	}
   1829 
   1830 	/*
   1831 	 * In addition, i82544 and later support I/O mapped indirect
   1832 	 * register access.  It is not desirable (nor supported in
   1833 	 * this driver) to use it for normal operation, though it is
   1834 	 * required to work around bugs in some chip versions.
   1835 	 */
   1836 	if (sc->sc_type >= WM_T_82544) {
   1837 		/* First we have to find the I/O BAR. */
   1838 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1839 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1840 			if (memtype == PCI_MAPREG_TYPE_IO)
   1841 				break;
   1842 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1843 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1844 				i += 4;	/* skip high bits, too */
   1845 		}
   1846 		if (i < PCI_MAPREG_END) {
   1847 			/*
   1848 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1849 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1850 			 * It's no problem because newer chips has no this
   1851 			 * bug.
   1852 			 *
   1853 			 * The i8254x doesn't apparently respond when the
   1854 			 * I/O BAR is 0, which looks somewhat like it's not
   1855 			 * been configured.
   1856 			 */
   1857 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1858 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1859 				aprint_error_dev(sc->sc_dev,
   1860 				    "WARNING: I/O BAR at zero.\n");
   1861 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1862 					0, &sc->sc_iot, &sc->sc_ioh,
   1863 					NULL, &sc->sc_ios) == 0) {
   1864 				sc->sc_flags |= WM_F_IOH_VALID;
   1865 			} else {
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "WARNING: unable to map I/O space\n");
   1868 			}
   1869 		}
   1870 
   1871 	}
   1872 
   1873 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1874 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1875 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1876 	if (sc->sc_type < WM_T_82542_2_1)
   1877 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1878 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1879 
   1880 	/* power up chip */
   1881 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1882 	    NULL)) && error != EOPNOTSUPP) {
   1883 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1884 		return;
   1885 	}
   1886 
   1887 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1888 	/*
   1889 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1890 	 * resource.
   1891 	 */
   1892 	if (sc->sc_nqueues > 1) {
   1893 		max_type = PCI_INTR_TYPE_MSIX;
   1894 		/*
   1895 		 *  82583 has a MSI-X capability in the PCI configuration space
   1896 		 * but it doesn't support it. At least the document doesn't
   1897 		 * say anything about MSI-X.
   1898 		 */
   1899 		counts[PCI_INTR_TYPE_MSIX]
   1900 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1901 	} else {
   1902 		max_type = PCI_INTR_TYPE_MSI;
   1903 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1904 	}
   1905 
   1906 	/* Allocation settings */
   1907 	counts[PCI_INTR_TYPE_MSI] = 1;
   1908 	counts[PCI_INTR_TYPE_INTX] = 1;
   1909 	/* overridden by disable flags */
   1910 	if (wm_disable_msi != 0) {
   1911 		counts[PCI_INTR_TYPE_MSI] = 0;
   1912 		if (wm_disable_msix != 0) {
   1913 			max_type = PCI_INTR_TYPE_INTX;
   1914 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1915 		}
   1916 	} else if (wm_disable_msix != 0) {
   1917 		max_type = PCI_INTR_TYPE_MSI;
   1918 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1919 	}
   1920 
   1921 alloc_retry:
   1922 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1923 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1924 		return;
   1925 	}
   1926 
   1927 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1928 		error = wm_setup_msix(sc);
   1929 		if (error) {
   1930 			pci_intr_release(pc, sc->sc_intrs,
   1931 			    counts[PCI_INTR_TYPE_MSIX]);
   1932 
   1933 			/* Setup for MSI: Disable MSI-X */
   1934 			max_type = PCI_INTR_TYPE_MSI;
   1935 			counts[PCI_INTR_TYPE_MSI] = 1;
   1936 			counts[PCI_INTR_TYPE_INTX] = 1;
   1937 			goto alloc_retry;
   1938 		}
   1939 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1940 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1941 		error = wm_setup_legacy(sc);
   1942 		if (error) {
   1943 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1944 			    counts[PCI_INTR_TYPE_MSI]);
   1945 
   1946 			/* The next try is for INTx: Disable MSI */
   1947 			max_type = PCI_INTR_TYPE_INTX;
   1948 			counts[PCI_INTR_TYPE_INTX] = 1;
   1949 			goto alloc_retry;
   1950 		}
   1951 	} else {
   1952 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1953 		error = wm_setup_legacy(sc);
   1954 		if (error) {
   1955 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1956 			    counts[PCI_INTR_TYPE_INTX]);
   1957 			return;
   1958 		}
   1959 	}
   1960 
   1961 	/*
   1962 	 * Check the function ID (unit number of the chip).
   1963 	 */
   1964 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1965 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1966 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1967 	    || (sc->sc_type == WM_T_82580)
   1968 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1969 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1970 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1971 	else
   1972 		sc->sc_funcid = 0;
   1973 
   1974 	/*
   1975 	 * Determine a few things about the bus we're connected to.
   1976 	 */
   1977 	if (sc->sc_type < WM_T_82543) {
   1978 		/* We don't really know the bus characteristics here. */
   1979 		sc->sc_bus_speed = 33;
   1980 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1981 		/*
   1982 		 * CSA (Communication Streaming Architecture) is about as fast
   1983 		 * a 32-bit 66MHz PCI Bus.
   1984 		 */
   1985 		sc->sc_flags |= WM_F_CSA;
   1986 		sc->sc_bus_speed = 66;
   1987 		aprint_verbose_dev(sc->sc_dev,
   1988 		    "Communication Streaming Architecture\n");
   1989 		if (sc->sc_type == WM_T_82547) {
   1990 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1991 			callout_setfunc(&sc->sc_txfifo_ch,
   1992 					wm_82547_txfifo_stall, sc);
   1993 			aprint_verbose_dev(sc->sc_dev,
   1994 			    "using 82547 Tx FIFO stall work-around\n");
   1995 		}
   1996 	} else if (sc->sc_type >= WM_T_82571) {
   1997 		sc->sc_flags |= WM_F_PCIE;
   1998 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1999 		    && (sc->sc_type != WM_T_ICH10)
   2000 		    && (sc->sc_type != WM_T_PCH)
   2001 		    && (sc->sc_type != WM_T_PCH2)
   2002 		    && (sc->sc_type != WM_T_PCH_LPT)
   2003 		    && (sc->sc_type != WM_T_PCH_SPT)
   2004 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2005 			/* ICH* and PCH* have no PCIe capability registers */
   2006 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2007 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2008 				NULL) == 0)
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unable to find PCIe capability\n");
   2011 		}
   2012 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2013 	} else {
   2014 		reg = CSR_READ(sc, WMREG_STATUS);
   2015 		if (reg & STATUS_BUS64)
   2016 			sc->sc_flags |= WM_F_BUS64;
   2017 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2018 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2019 
   2020 			sc->sc_flags |= WM_F_PCIX;
   2021 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2022 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unable to find PCIX capability\n");
   2025 			else if (sc->sc_type != WM_T_82545_3 &&
   2026 				 sc->sc_type != WM_T_82546_3) {
   2027 				/*
   2028 				 * Work around a problem caused by the BIOS
   2029 				 * setting the max memory read byte count
   2030 				 * incorrectly.
   2031 				 */
   2032 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2033 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2034 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2035 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2036 
   2037 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2038 				    PCIX_CMD_BYTECNT_SHIFT;
   2039 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2040 				    PCIX_STATUS_MAXB_SHIFT;
   2041 				if (bytecnt > maxb) {
   2042 					aprint_verbose_dev(sc->sc_dev,
   2043 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2044 					    512 << bytecnt, 512 << maxb);
   2045 					pcix_cmd = (pcix_cmd &
   2046 					    ~PCIX_CMD_BYTECNT_MASK) |
   2047 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2048 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2049 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2050 					    pcix_cmd);
   2051 				}
   2052 			}
   2053 		}
   2054 		/*
   2055 		 * The quad port adapter is special; it has a PCIX-PCIX
   2056 		 * bridge on the board, and can run the secondary bus at
   2057 		 * a higher speed.
   2058 		 */
   2059 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2060 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2061 								      : 66;
   2062 		} else if (sc->sc_flags & WM_F_PCIX) {
   2063 			switch (reg & STATUS_PCIXSPD_MASK) {
   2064 			case STATUS_PCIXSPD_50_66:
   2065 				sc->sc_bus_speed = 66;
   2066 				break;
   2067 			case STATUS_PCIXSPD_66_100:
   2068 				sc->sc_bus_speed = 100;
   2069 				break;
   2070 			case STATUS_PCIXSPD_100_133:
   2071 				sc->sc_bus_speed = 133;
   2072 				break;
   2073 			default:
   2074 				aprint_error_dev(sc->sc_dev,
   2075 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2076 				    reg & STATUS_PCIXSPD_MASK);
   2077 				sc->sc_bus_speed = 66;
   2078 				break;
   2079 			}
   2080 		} else
   2081 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2082 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2083 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2084 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2085 	}
   2086 
   2087 	/* Disable ASPM L0s and/or L1 for workaround */
   2088 	wm_disable_aspm(sc);
   2089 
   2090 	/* clear interesting stat counters */
   2091 	CSR_READ(sc, WMREG_COLC);
   2092 	CSR_READ(sc, WMREG_RXERRC);
   2093 
   2094 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2095 	    || (sc->sc_type >= WM_T_ICH8))
   2096 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2097 	if (sc->sc_type >= WM_T_ICH8)
   2098 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2099 
   2100 	/* Set PHY, NVM mutex related stuff */
   2101 	switch (sc->sc_type) {
   2102 	case WM_T_82542_2_0:
   2103 	case WM_T_82542_2_1:
   2104 	case WM_T_82543:
   2105 	case WM_T_82544:
   2106 		/* Microwire */
   2107 		sc->nvm.read = wm_nvm_read_uwire;
   2108 		sc->sc_nvm_wordsize = 64;
   2109 		sc->sc_nvm_addrbits = 6;
   2110 		break;
   2111 	case WM_T_82540:
   2112 	case WM_T_82545:
   2113 	case WM_T_82545_3:
   2114 	case WM_T_82546:
   2115 	case WM_T_82546_3:
   2116 		/* Microwire */
   2117 		sc->nvm.read = wm_nvm_read_uwire;
   2118 		reg = CSR_READ(sc, WMREG_EECD);
   2119 		if (reg & EECD_EE_SIZE) {
   2120 			sc->sc_nvm_wordsize = 256;
   2121 			sc->sc_nvm_addrbits = 8;
   2122 		} else {
   2123 			sc->sc_nvm_wordsize = 64;
   2124 			sc->sc_nvm_addrbits = 6;
   2125 		}
   2126 		sc->sc_flags |= WM_F_LOCK_EECD;
   2127 		sc->nvm.acquire = wm_get_eecd;
   2128 		sc->nvm.release = wm_put_eecd;
   2129 		break;
   2130 	case WM_T_82541:
   2131 	case WM_T_82541_2:
   2132 	case WM_T_82547:
   2133 	case WM_T_82547_2:
   2134 		reg = CSR_READ(sc, WMREG_EECD);
   2135 		/*
   2136 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2137 		 * on 8254[17], so set flags and functios before calling it.
   2138 		 */
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		if (reg & EECD_EE_TYPE) {
   2143 			/* SPI */
   2144 			sc->nvm.read = wm_nvm_read_spi;
   2145 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 		} else {
   2148 			/* Microwire */
   2149 			sc->nvm.read = wm_nvm_read_uwire;
   2150 			if ((reg & EECD_EE_ABITS) != 0) {
   2151 				sc->sc_nvm_wordsize = 256;
   2152 				sc->sc_nvm_addrbits = 8;
   2153 			} else {
   2154 				sc->sc_nvm_wordsize = 64;
   2155 				sc->sc_nvm_addrbits = 6;
   2156 			}
   2157 		}
   2158 		break;
   2159 	case WM_T_82571:
   2160 	case WM_T_82572:
   2161 		/* SPI */
   2162 		sc->nvm.read = wm_nvm_read_eerd;
   2163 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2164 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2165 		wm_nvm_set_addrbits_size_eecd(sc);
   2166 		sc->phy.acquire = wm_get_swsm_semaphore;
   2167 		sc->phy.release = wm_put_swsm_semaphore;
   2168 		sc->nvm.acquire = wm_get_nvm_82571;
   2169 		sc->nvm.release = wm_put_nvm_82571;
   2170 		break;
   2171 	case WM_T_82573:
   2172 	case WM_T_82574:
   2173 	case WM_T_82583:
   2174 		sc->nvm.read = wm_nvm_read_eerd;
   2175 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2176 		if (sc->sc_type == WM_T_82573) {
   2177 			sc->phy.acquire = wm_get_swsm_semaphore;
   2178 			sc->phy.release = wm_put_swsm_semaphore;
   2179 			sc->nvm.acquire = wm_get_nvm_82571;
   2180 			sc->nvm.release = wm_put_nvm_82571;
   2181 		} else {
   2182 			/* Both PHY and NVM use the same semaphore. */
   2183 			sc->phy.acquire = sc->nvm.acquire
   2184 			    = wm_get_swfwhw_semaphore;
   2185 			sc->phy.release = sc->nvm.release
   2186 			    = wm_put_swfwhw_semaphore;
   2187 		}
   2188 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2189 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2190 			sc->sc_nvm_wordsize = 2048;
   2191 		} else {
   2192 			/* SPI */
   2193 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 			wm_nvm_set_addrbits_size_eecd(sc);
   2195 		}
   2196 		break;
   2197 	case WM_T_82575:
   2198 	case WM_T_82576:
   2199 	case WM_T_82580:
   2200 	case WM_T_I350:
   2201 	case WM_T_I354:
   2202 	case WM_T_80003:
   2203 		/* SPI */
   2204 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2205 		wm_nvm_set_addrbits_size_eecd(sc);
   2206 		if ((sc->sc_type == WM_T_80003)
   2207 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2208 			sc->nvm.read = wm_nvm_read_eerd;
   2209 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2210 		} else {
   2211 			sc->nvm.read = wm_nvm_read_spi;
   2212 			sc->sc_flags |= WM_F_LOCK_EECD;
   2213 		}
   2214 		sc->phy.acquire = wm_get_phy_82575;
   2215 		sc->phy.release = wm_put_phy_82575;
   2216 		sc->nvm.acquire = wm_get_nvm_80003;
   2217 		sc->nvm.release = wm_put_nvm_80003;
   2218 		break;
   2219 	case WM_T_ICH8:
   2220 	case WM_T_ICH9:
   2221 	case WM_T_ICH10:
   2222 	case WM_T_PCH:
   2223 	case WM_T_PCH2:
   2224 	case WM_T_PCH_LPT:
   2225 		sc->nvm.read = wm_nvm_read_ich8;
   2226 		/* FLASH */
   2227 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2228 		sc->sc_nvm_wordsize = 2048;
   2229 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2230 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2231 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2232 			aprint_error_dev(sc->sc_dev,
   2233 			    "can't map FLASH registers\n");
   2234 			goto out;
   2235 		}
   2236 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2237 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2238 		    ICH_FLASH_SECTOR_SIZE;
   2239 		sc->sc_ich8_flash_bank_size =
   2240 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2241 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2242 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2243 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2244 		sc->sc_flashreg_offset = 0;
   2245 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2246 		sc->phy.release = wm_put_swflag_ich8lan;
   2247 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2248 		sc->nvm.release = wm_put_nvm_ich8lan;
   2249 		break;
   2250 	case WM_T_PCH_SPT:
   2251 	case WM_T_PCH_CNP:
   2252 		sc->nvm.read = wm_nvm_read_spt;
   2253 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2254 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2255 		sc->sc_flasht = sc->sc_st;
   2256 		sc->sc_flashh = sc->sc_sh;
   2257 		sc->sc_ich8_flash_base = 0;
   2258 		sc->sc_nvm_wordsize =
   2259 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2260 			* NVM_SIZE_MULTIPLIER;
   2261 		/* It is size in bytes, we want words */
   2262 		sc->sc_nvm_wordsize /= 2;
   2263 		/* assume 2 banks */
   2264 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2265 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2266 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2267 		sc->phy.release = wm_put_swflag_ich8lan;
   2268 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2269 		sc->nvm.release = wm_put_nvm_ich8lan;
   2270 		break;
   2271 	case WM_T_I210:
   2272 	case WM_T_I211:
   2273 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2274 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2275 		if (wm_nvm_flash_presence_i210(sc)) {
   2276 			sc->nvm.read = wm_nvm_read_eerd;
   2277 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2278 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2279 			wm_nvm_set_addrbits_size_eecd(sc);
   2280 		} else {
   2281 			sc->nvm.read = wm_nvm_read_invm;
   2282 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2283 			sc->sc_nvm_wordsize = INVM_SIZE;
   2284 		}
   2285 		sc->phy.acquire = wm_get_phy_82575;
   2286 		sc->phy.release = wm_put_phy_82575;
   2287 		sc->nvm.acquire = wm_get_nvm_80003;
   2288 		sc->nvm.release = wm_put_nvm_80003;
   2289 		break;
   2290 	default:
   2291 		break;
   2292 	}
   2293 
   2294 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2295 	switch (sc->sc_type) {
   2296 	case WM_T_82571:
   2297 	case WM_T_82572:
   2298 		reg = CSR_READ(sc, WMREG_SWSM2);
   2299 		if ((reg & SWSM2_LOCK) == 0) {
   2300 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2301 			force_clear_smbi = true;
   2302 		} else
   2303 			force_clear_smbi = false;
   2304 		break;
   2305 	case WM_T_82573:
   2306 	case WM_T_82574:
   2307 	case WM_T_82583:
   2308 		force_clear_smbi = true;
   2309 		break;
   2310 	default:
   2311 		force_clear_smbi = false;
   2312 		break;
   2313 	}
   2314 	if (force_clear_smbi) {
   2315 		reg = CSR_READ(sc, WMREG_SWSM);
   2316 		if ((reg & SWSM_SMBI) != 0)
   2317 			aprint_error_dev(sc->sc_dev,
   2318 			    "Please update the Bootagent\n");
   2319 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2320 	}
   2321 
   2322 	/*
   2323 	 * Defer printing the EEPROM type until after verifying the checksum
   2324 	 * This allows the EEPROM type to be printed correctly in the case
   2325 	 * that no EEPROM is attached.
   2326 	 */
   2327 	/*
   2328 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2329 	 * this for later, so we can fail future reads from the EEPROM.
   2330 	 */
   2331 	if (wm_nvm_validate_checksum(sc)) {
   2332 		/*
   2333 		 * Read twice again because some PCI-e parts fail the
   2334 		 * first check due to the link being in sleep state.
   2335 		 */
   2336 		if (wm_nvm_validate_checksum(sc))
   2337 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2338 	}
   2339 
   2340 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2341 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2342 	else {
   2343 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2344 		    sc->sc_nvm_wordsize);
   2345 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2346 			aprint_verbose("iNVM");
   2347 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2348 			aprint_verbose("FLASH(HW)");
   2349 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2350 			aprint_verbose("FLASH");
   2351 		else {
   2352 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2353 				eetype = "SPI";
   2354 			else
   2355 				eetype = "MicroWire";
   2356 			aprint_verbose("(%d address bits) %s EEPROM",
   2357 			    sc->sc_nvm_addrbits, eetype);
   2358 		}
   2359 	}
   2360 	wm_nvm_version(sc);
   2361 	aprint_verbose("\n");
   2362 
   2363 	/*
   2364 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2365 	 * incorrect.
   2366 	 */
   2367 	wm_gmii_setup_phytype(sc, 0, 0);
   2368 
   2369 	/* Reset the chip to a known state. */
   2370 	wm_reset(sc);
   2371 
   2372 	/*
   2373 	 * Check for I21[01] PLL workaround.
   2374 	 *
   2375 	 * Three cases:
   2376 	 * a) Chip is I211.
   2377 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2378 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2379 	 */
   2380 	if (sc->sc_type == WM_T_I211)
   2381 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2382 	if (sc->sc_type == WM_T_I210) {
   2383 		if (!wm_nvm_flash_presence_i210(sc))
   2384 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2385 		else if ((sc->sc_nvm_ver_major < 3)
   2386 		    || ((sc->sc_nvm_ver_major == 3)
   2387 			&& (sc->sc_nvm_ver_minor < 25))) {
   2388 			aprint_verbose_dev(sc->sc_dev,
   2389 			    "ROM image version %d.%d is older than 3.25\n",
   2390 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2392 		}
   2393 	}
   2394 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2395 		wm_pll_workaround_i210(sc);
   2396 
   2397 	wm_get_wakeup(sc);
   2398 
   2399 	/* Non-AMT based hardware can now take control from firmware */
   2400 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2401 		wm_get_hw_control(sc);
   2402 
   2403 	/*
   2404 	 * Read the Ethernet address from the EEPROM, if not first found
   2405 	 * in device properties.
   2406 	 */
   2407 	ea = prop_dictionary_get(dict, "mac-address");
   2408 	if (ea != NULL) {
   2409 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2410 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2411 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2412 	} else {
   2413 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2414 			aprint_error_dev(sc->sc_dev,
   2415 			    "unable to read Ethernet address\n");
   2416 			goto out;
   2417 		}
   2418 	}
   2419 
   2420 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2421 	    ether_sprintf(enaddr));
   2422 
   2423 	/*
   2424 	 * Read the config info from the EEPROM, and set up various
   2425 	 * bits in the control registers based on their contents.
   2426 	 */
   2427 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2428 	if (pn != NULL) {
   2429 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2430 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2431 	} else {
   2432 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2433 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2434 			goto out;
   2435 		}
   2436 	}
   2437 
   2438 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2439 	if (pn != NULL) {
   2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2441 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2442 	} else {
   2443 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	/* check for WM_F_WOL */
   2450 	switch (sc->sc_type) {
   2451 	case WM_T_82542_2_0:
   2452 	case WM_T_82542_2_1:
   2453 	case WM_T_82543:
   2454 		/* dummy? */
   2455 		eeprom_data = 0;
   2456 		apme_mask = NVM_CFG3_APME;
   2457 		break;
   2458 	case WM_T_82544:
   2459 		apme_mask = NVM_CFG2_82544_APM_EN;
   2460 		eeprom_data = cfg2;
   2461 		break;
   2462 	case WM_T_82546:
   2463 	case WM_T_82546_3:
   2464 	case WM_T_82571:
   2465 	case WM_T_82572:
   2466 	case WM_T_82573:
   2467 	case WM_T_82574:
   2468 	case WM_T_82583:
   2469 	case WM_T_80003:
   2470 	default:
   2471 		apme_mask = NVM_CFG3_APME;
   2472 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2473 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2474 		break;
   2475 	case WM_T_82575:
   2476 	case WM_T_82576:
   2477 	case WM_T_82580:
   2478 	case WM_T_I350:
   2479 	case WM_T_I354: /* XXX ok? */
   2480 	case WM_T_ICH8:
   2481 	case WM_T_ICH9:
   2482 	case WM_T_ICH10:
   2483 	case WM_T_PCH:
   2484 	case WM_T_PCH2:
   2485 	case WM_T_PCH_LPT:
   2486 	case WM_T_PCH_SPT:
   2487 	case WM_T_PCH_CNP:
   2488 		/* XXX The funcid should be checked on some devices */
   2489 		apme_mask = WUC_APME;
   2490 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2491 		break;
   2492 	}
   2493 
   2494 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2495 	if ((eeprom_data & apme_mask) != 0)
   2496 		sc->sc_flags |= WM_F_WOL;
   2497 
   2498 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2499 		/* Check NVM for autonegotiation */
   2500 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2501 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2502 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2503 		}
   2504 	}
   2505 
   2506 	/*
   2507 	 * XXX need special handling for some multiple port cards
   2508 	 * to disable a paticular port.
   2509 	 */
   2510 
   2511 	if (sc->sc_type >= WM_T_82544) {
   2512 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2513 		if (pn != NULL) {
   2514 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2515 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2516 		} else {
   2517 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2518 				aprint_error_dev(sc->sc_dev,
   2519 				    "unable to read SWDPIN\n");
   2520 				goto out;
   2521 			}
   2522 		}
   2523 	}
   2524 
   2525 	if (cfg1 & NVM_CFG1_ILOS)
   2526 		sc->sc_ctrl |= CTRL_ILOS;
   2527 
   2528 	/*
   2529 	 * XXX
   2530 	 * This code isn't correct because pin 2 and 3 are located
   2531 	 * in different position on newer chips. Check all datasheet.
   2532 	 *
   2533 	 * Until resolve this problem, check if a chip < 82580
   2534 	 */
   2535 	if (sc->sc_type <= WM_T_82580) {
   2536 		if (sc->sc_type >= WM_T_82544) {
   2537 			sc->sc_ctrl |=
   2538 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2539 			    CTRL_SWDPIO_SHIFT;
   2540 			sc->sc_ctrl |=
   2541 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2542 			    CTRL_SWDPINS_SHIFT;
   2543 		} else {
   2544 			sc->sc_ctrl |=
   2545 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2546 			    CTRL_SWDPIO_SHIFT;
   2547 		}
   2548 	}
   2549 
   2550 	/* XXX For other than 82580? */
   2551 	if (sc->sc_type == WM_T_82580) {
   2552 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2553 		if (nvmword & __BIT(13))
   2554 			sc->sc_ctrl |= CTRL_ILOS;
   2555 	}
   2556 
   2557 #if 0
   2558 	if (sc->sc_type >= WM_T_82544) {
   2559 		if (cfg1 & NVM_CFG1_IPS0)
   2560 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2561 		if (cfg1 & NVM_CFG1_IPS1)
   2562 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2563 		sc->sc_ctrl_ext |=
   2564 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2565 		    CTRL_EXT_SWDPIO_SHIFT;
   2566 		sc->sc_ctrl_ext |=
   2567 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2568 		    CTRL_EXT_SWDPINS_SHIFT;
   2569 	} else {
   2570 		sc->sc_ctrl_ext |=
   2571 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2572 		    CTRL_EXT_SWDPIO_SHIFT;
   2573 	}
   2574 #endif
   2575 
   2576 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2577 #if 0
   2578 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2579 #endif
   2580 
   2581 	if (sc->sc_type == WM_T_PCH) {
   2582 		uint16_t val;
   2583 
   2584 		/* Save the NVM K1 bit setting */
   2585 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2586 
   2587 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2588 			sc->sc_nvm_k1_enabled = 1;
   2589 		else
   2590 			sc->sc_nvm_k1_enabled = 0;
   2591 	}
   2592 
   2593 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2594 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2595 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2596 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2597 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2598 	    || sc->sc_type == WM_T_82573
   2599 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2600 		/* Copper only */
   2601 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2602 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2603 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2604 	    || (sc->sc_type ==WM_T_I211)) {
   2605 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2606 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2607 		switch (link_mode) {
   2608 		case CTRL_EXT_LINK_MODE_1000KX:
   2609 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2610 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2611 			break;
   2612 		case CTRL_EXT_LINK_MODE_SGMII:
   2613 			if (wm_sgmii_uses_mdio(sc)) {
   2614 				aprint_verbose_dev(sc->sc_dev,
   2615 				    "SGMII(MDIO)\n");
   2616 				sc->sc_flags |= WM_F_SGMII;
   2617 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2618 				break;
   2619 			}
   2620 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2621 			/*FALLTHROUGH*/
   2622 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2623 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2624 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2625 				if (link_mode
   2626 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2627 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2628 					sc->sc_flags |= WM_F_SGMII;
   2629 				} else {
   2630 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2631 					aprint_verbose_dev(sc->sc_dev,
   2632 					    "SERDES\n");
   2633 				}
   2634 				break;
   2635 			}
   2636 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2637 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2638 
   2639 			/* Change current link mode setting */
   2640 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2641 			switch (sc->sc_mediatype) {
   2642 			case WM_MEDIATYPE_COPPER:
   2643 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2644 				break;
   2645 			case WM_MEDIATYPE_SERDES:
   2646 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2647 				break;
   2648 			default:
   2649 				break;
   2650 			}
   2651 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2652 			break;
   2653 		case CTRL_EXT_LINK_MODE_GMII:
   2654 		default:
   2655 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2656 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2657 			break;
   2658 		}
   2659 
   2660 		reg &= ~CTRL_EXT_I2C_ENA;
   2661 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2662 			reg |= CTRL_EXT_I2C_ENA;
   2663 		else
   2664 			reg &= ~CTRL_EXT_I2C_ENA;
   2665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2666 	} else if (sc->sc_type < WM_T_82543 ||
   2667 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2668 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2669 			aprint_error_dev(sc->sc_dev,
   2670 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2671 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2672 		}
   2673 	} else {
   2674 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2675 			aprint_error_dev(sc->sc_dev,
   2676 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2677 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2678 		}
   2679 	}
   2680 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2681 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2682 
   2683 	/* Set device properties (macflags) */
   2684 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2685 
   2686 	/* Initialize the media structures accordingly. */
   2687 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2688 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2689 	else
   2690 		wm_tbi_mediainit(sc); /* All others */
   2691 
   2692 	ifp = &sc->sc_ethercom.ec_if;
   2693 	xname = device_xname(sc->sc_dev);
   2694 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2695 	ifp->if_softc = sc;
   2696 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2697 #ifdef WM_MPSAFE
   2698 	ifp->if_extflags = IFEF_MPSAFE;
   2699 #endif
   2700 	ifp->if_ioctl = wm_ioctl;
   2701 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2702 		ifp->if_start = wm_nq_start;
   2703 		/*
   2704 		 * When the number of CPUs is one and the controller can use
   2705 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2706 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2707 		 * and the other is used for link status changing.
   2708 		 * In this situation, wm_nq_transmit() is disadvantageous
   2709 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2710 		 */
   2711 		if (wm_is_using_multiqueue(sc))
   2712 			ifp->if_transmit = wm_nq_transmit;
   2713 	} else {
   2714 		ifp->if_start = wm_start;
   2715 		/*
   2716 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2717 		 */
   2718 		if (wm_is_using_multiqueue(sc))
   2719 			ifp->if_transmit = wm_transmit;
   2720 	}
   2721 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2722 	ifp->if_init = wm_init;
   2723 	ifp->if_stop = wm_stop;
   2724 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2725 	IFQ_SET_READY(&ifp->if_snd);
   2726 
   2727 	/* Check for jumbo frame */
   2728 	switch (sc->sc_type) {
   2729 	case WM_T_82573:
   2730 		/* XXX limited to 9234 if ASPM is disabled */
   2731 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2732 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2733 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2734 		break;
   2735 	case WM_T_82571:
   2736 	case WM_T_82572:
   2737 	case WM_T_82574:
   2738 	case WM_T_82583:
   2739 	case WM_T_82575:
   2740 	case WM_T_82576:
   2741 	case WM_T_82580:
   2742 	case WM_T_I350:
   2743 	case WM_T_I354:
   2744 	case WM_T_I210:
   2745 	case WM_T_I211:
   2746 	case WM_T_80003:
   2747 	case WM_T_ICH9:
   2748 	case WM_T_ICH10:
   2749 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2750 	case WM_T_PCH_LPT:
   2751 	case WM_T_PCH_SPT:
   2752 	case WM_T_PCH_CNP:
   2753 		/* XXX limited to 9234 */
   2754 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2755 		break;
   2756 	case WM_T_PCH:
   2757 		/* XXX limited to 4096 */
   2758 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2759 		break;
   2760 	case WM_T_82542_2_0:
   2761 	case WM_T_82542_2_1:
   2762 	case WM_T_ICH8:
   2763 		/* No support for jumbo frame */
   2764 		break;
   2765 	default:
   2766 		/* ETHER_MAX_LEN_JUMBO */
   2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2768 		break;
   2769 	}
   2770 
   2771 	/* If we're a i82543 or greater, we can support VLANs. */
   2772 	if (sc->sc_type >= WM_T_82543)
   2773 		sc->sc_ethercom.ec_capabilities |=
   2774 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2775 
   2776 	/*
   2777 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2778 	 * on i82543 and later.
   2779 	 */
   2780 	if (sc->sc_type >= WM_T_82543) {
   2781 		ifp->if_capabilities |=
   2782 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2783 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2784 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2785 		    IFCAP_CSUM_TCPv6_Tx |
   2786 		    IFCAP_CSUM_UDPv6_Tx;
   2787 	}
   2788 
   2789 	/*
   2790 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2791 	 *
   2792 	 *	82541GI (8086:1076) ... no
   2793 	 *	82572EI (8086:10b9) ... yes
   2794 	 */
   2795 	if (sc->sc_type >= WM_T_82571) {
   2796 		ifp->if_capabilities |=
   2797 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2798 	}
   2799 
   2800 	/*
   2801 	 * If we're a i82544 or greater (except i82547), we can do
   2802 	 * TCP segmentation offload.
   2803 	 */
   2804 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2805 		ifp->if_capabilities |= IFCAP_TSOv4;
   2806 	}
   2807 
   2808 	if (sc->sc_type >= WM_T_82571) {
   2809 		ifp->if_capabilities |= IFCAP_TSOv6;
   2810 	}
   2811 
   2812 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2813 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2814 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2815 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2816 
   2817 #ifdef WM_MPSAFE
   2818 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2819 #else
   2820 	sc->sc_core_lock = NULL;
   2821 #endif
   2822 
   2823 	/* Attach the interface. */
   2824 	error = if_initialize(ifp);
   2825 	if (error != 0) {
   2826 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2827 		    error);
   2828 		return; /* Error */
   2829 	}
   2830 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2831 	ether_ifattach(ifp, enaddr);
   2832 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2833 	if_register(ifp);
   2834 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2835 			  RND_FLAG_DEFAULT);
   2836 
   2837 #ifdef WM_EVENT_COUNTERS
   2838 	/* Attach event counters. */
   2839 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2840 	    NULL, xname, "linkintr");
   2841 
   2842 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2843 	    NULL, xname, "tx_xoff");
   2844 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2845 	    NULL, xname, "tx_xon");
   2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2847 	    NULL, xname, "rx_xoff");
   2848 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2849 	    NULL, xname, "rx_xon");
   2850 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2851 	    NULL, xname, "rx_macctl");
   2852 #endif /* WM_EVENT_COUNTERS */
   2853 
   2854 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2855 		pmf_class_network_register(self, ifp);
   2856 	else
   2857 		aprint_error_dev(self, "couldn't establish power handler\n");
   2858 
   2859 	sc->sc_flags |= WM_F_ATTACHED;
   2860  out:
   2861 	return;
   2862 }
   2863 
   2864 /* The detach function (ca_detach) */
   2865 static int
   2866 wm_detach(device_t self, int flags __unused)
   2867 {
   2868 	struct wm_softc *sc = device_private(self);
   2869 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2870 	int i;
   2871 
   2872 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2873 		return 0;
   2874 
   2875 	/* Stop the interface. Callouts are stopped in it. */
   2876 	wm_stop(ifp, 1);
   2877 
   2878 	pmf_device_deregister(self);
   2879 
   2880 #ifdef WM_EVENT_COUNTERS
   2881 	evcnt_detach(&sc->sc_ev_linkintr);
   2882 
   2883 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2884 	evcnt_detach(&sc->sc_ev_tx_xon);
   2885 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2886 	evcnt_detach(&sc->sc_ev_rx_xon);
   2887 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2888 #endif /* WM_EVENT_COUNTERS */
   2889 
   2890 	/* Tell the firmware about the release */
   2891 	WM_CORE_LOCK(sc);
   2892 	wm_release_manageability(sc);
   2893 	wm_release_hw_control(sc);
   2894 	wm_enable_wakeup(sc);
   2895 	WM_CORE_UNLOCK(sc);
   2896 
   2897 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2898 
   2899 	/* Delete all remaining media. */
   2900 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2901 
   2902 	ether_ifdetach(ifp);
   2903 	if_detach(ifp);
   2904 	if_percpuq_destroy(sc->sc_ipq);
   2905 
   2906 	/* Unload RX dmamaps and free mbufs */
   2907 	for (i = 0; i < sc->sc_nqueues; i++) {
   2908 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2909 		mutex_enter(rxq->rxq_lock);
   2910 		wm_rxdrain(rxq);
   2911 		mutex_exit(rxq->rxq_lock);
   2912 	}
   2913 	/* Must unlock here */
   2914 
   2915 	/* Disestablish the interrupt handler */
   2916 	for (i = 0; i < sc->sc_nintrs; i++) {
   2917 		if (sc->sc_ihs[i] != NULL) {
   2918 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2919 			sc->sc_ihs[i] = NULL;
   2920 		}
   2921 	}
   2922 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2923 
   2924 	wm_free_txrx_queues(sc);
   2925 
   2926 	/* Unmap the registers */
   2927 	if (sc->sc_ss) {
   2928 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2929 		sc->sc_ss = 0;
   2930 	}
   2931 	if (sc->sc_ios) {
   2932 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2933 		sc->sc_ios = 0;
   2934 	}
   2935 	if (sc->sc_flashs) {
   2936 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2937 		sc->sc_flashs = 0;
   2938 	}
   2939 
   2940 	if (sc->sc_core_lock)
   2941 		mutex_obj_free(sc->sc_core_lock);
   2942 	if (sc->sc_ich_phymtx)
   2943 		mutex_obj_free(sc->sc_ich_phymtx);
   2944 	if (sc->sc_ich_nvmmtx)
   2945 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2946 
   2947 	return 0;
   2948 }
   2949 
   2950 static bool
   2951 wm_suspend(device_t self, const pmf_qual_t *qual)
   2952 {
   2953 	struct wm_softc *sc = device_private(self);
   2954 
   2955 	wm_release_manageability(sc);
   2956 	wm_release_hw_control(sc);
   2957 	wm_enable_wakeup(sc);
   2958 
   2959 	return true;
   2960 }
   2961 
   2962 static bool
   2963 wm_resume(device_t self, const pmf_qual_t *qual)
   2964 {
   2965 	struct wm_softc *sc = device_private(self);
   2966 
   2967 	/* Disable ASPM L0s and/or L1 for workaround */
   2968 	wm_disable_aspm(sc);
   2969 	wm_init_manageability(sc);
   2970 
   2971 	return true;
   2972 }
   2973 
   2974 /*
   2975  * wm_watchdog:		[ifnet interface function]
   2976  *
   2977  *	Watchdog timer handler.
   2978  */
   2979 static void
   2980 wm_watchdog(struct ifnet *ifp)
   2981 {
   2982 	int qid;
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2985 
   2986 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2987 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2988 
   2989 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2990 	}
   2991 
   2992 	/*
   2993 	 * IF any of queues hanged up, reset the interface.
   2994 	 */
   2995 	if (hang_queue != 0) {
   2996 		(void) wm_init(ifp);
   2997 
   2998 		/*
   2999 		 * There are still some upper layer processing which call
   3000 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3001 		 */
   3002 		/* Try to get more packets going. */
   3003 		ifp->if_start(ifp);
   3004 	}
   3005 }
   3006 
   3007 
   3008 static void
   3009 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3010 {
   3011 
   3012 	mutex_enter(txq->txq_lock);
   3013 	if (txq->txq_sending &&
   3014 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3015 		wm_watchdog_txq_locked(ifp, txq, hang);
   3016 	}
   3017 	mutex_exit(txq->txq_lock);
   3018 }
   3019 
   3020 static void
   3021 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3022     uint16_t *hang)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3026 
   3027 	KASSERT(mutex_owned(txq->txq_lock));
   3028 
   3029 	/*
   3030 	 * Since we're using delayed interrupts, sweep up
   3031 	 * before we report an error.
   3032 	 */
   3033 	wm_txeof(txq, UINT_MAX);
   3034 
   3035 	if (txq->txq_sending)
   3036 		*hang |= __BIT(wmq->wmq_id);
   3037 
   3038 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3039 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3040 		    device_xname(sc->sc_dev));
   3041 	} else {
   3042 #ifdef WM_DEBUG
   3043 		int i, j;
   3044 		struct wm_txsoft *txs;
   3045 #endif
   3046 		log(LOG_ERR,
   3047 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3048 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3049 		    txq->txq_next);
   3050 		ifp->if_oerrors++;
   3051 #ifdef WM_DEBUG
   3052 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3053 		    i = WM_NEXTTXS(txq, i)) {
   3054 		    txs = &txq->txq_soft[i];
   3055 		    printf("txs %d tx %d -> %d\n",
   3056 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3057 		    for (j = txs->txs_firstdesc; ;
   3058 			j = WM_NEXTTX(txq, j)) {
   3059 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3060 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3061 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3062 				    printf("\t %#08x%08x\n",
   3063 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3064 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3065 			    } else {
   3066 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3067 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3068 					txq->txq_descs[j].wtx_addr.wa_low);
   3069 				    printf("\t %#04x%02x%02x%08x\n",
   3070 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3071 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3072 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3073 					txq->txq_descs[j].wtx_cmdlen);
   3074 			    }
   3075 			if (j == txs->txs_lastdesc)
   3076 				break;
   3077 			}
   3078 		}
   3079 #endif
   3080 	}
   3081 }
   3082 
   3083 /*
   3084  * wm_tick:
   3085  *
   3086  *	One second timer, used to check link status, sweep up
   3087  *	completed transmit jobs, etc.
   3088  */
   3089 static void
   3090 wm_tick(void *arg)
   3091 {
   3092 	struct wm_softc *sc = arg;
   3093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3094 #ifndef WM_MPSAFE
   3095 	int s = splnet();
   3096 #endif
   3097 
   3098 	WM_CORE_LOCK(sc);
   3099 
   3100 	if (sc->sc_core_stopping) {
   3101 		WM_CORE_UNLOCK(sc);
   3102 #ifndef WM_MPSAFE
   3103 		splx(s);
   3104 #endif
   3105 		return;
   3106 	}
   3107 
   3108 	if (sc->sc_type >= WM_T_82542_2_1) {
   3109 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3110 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3111 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3112 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3113 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3114 	}
   3115 
   3116 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3117 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3118 	    + CSR_READ(sc, WMREG_CRCERRS)
   3119 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3120 	    + CSR_READ(sc, WMREG_SYMERRC)
   3121 	    + CSR_READ(sc, WMREG_RXERRC)
   3122 	    + CSR_READ(sc, WMREG_SEC)
   3123 	    + CSR_READ(sc, WMREG_CEXTERR)
   3124 	    + CSR_READ(sc, WMREG_RLEC);
   3125 	/*
   3126 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3127 	 * memory. It does not mean the number of dropped packet. Because
   3128 	 * ethernet controller can receive packets in such case if there is
   3129 	 * space in phy's FIFO.
   3130 	 *
   3131 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3132 	 * own EVCNT instead of if_iqdrops.
   3133 	 */
   3134 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3135 
   3136 	if (sc->sc_flags & WM_F_HAS_MII)
   3137 		mii_tick(&sc->sc_mii);
   3138 	else if ((sc->sc_type >= WM_T_82575)
   3139 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3140 		wm_serdes_tick(sc);
   3141 	else
   3142 		wm_tbi_tick(sc);
   3143 
   3144 	WM_CORE_UNLOCK(sc);
   3145 
   3146 	wm_watchdog(ifp);
   3147 
   3148 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3149 }
   3150 
   3151 static int
   3152 wm_ifflags_cb(struct ethercom *ec)
   3153 {
   3154 	struct ifnet *ifp = &ec->ec_if;
   3155 	struct wm_softc *sc = ifp->if_softc;
   3156 	int rc = 0;
   3157 
   3158 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3159 		device_xname(sc->sc_dev), __func__));
   3160 
   3161 	WM_CORE_LOCK(sc);
   3162 
   3163 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3164 	sc->sc_if_flags = ifp->if_flags;
   3165 
   3166 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3167 		rc = ENETRESET;
   3168 		goto out;
   3169 	}
   3170 
   3171 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3172 		wm_set_filter(sc);
   3173 
   3174 	wm_set_vlan(sc);
   3175 
   3176 out:
   3177 	WM_CORE_UNLOCK(sc);
   3178 
   3179 	return rc;
   3180 }
   3181 
   3182 /*
   3183  * wm_ioctl:		[ifnet interface function]
   3184  *
   3185  *	Handle control requests from the operator.
   3186  */
   3187 static int
   3188 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3189 {
   3190 	struct wm_softc *sc = ifp->if_softc;
   3191 	struct ifreq *ifr = (struct ifreq *) data;
   3192 	struct ifaddr *ifa = (struct ifaddr *)data;
   3193 	struct sockaddr_dl *sdl;
   3194 	int s, error;
   3195 
   3196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3197 		device_xname(sc->sc_dev), __func__));
   3198 
   3199 #ifndef WM_MPSAFE
   3200 	s = splnet();
   3201 #endif
   3202 	switch (cmd) {
   3203 	case SIOCSIFMEDIA:
   3204 	case SIOCGIFMEDIA:
   3205 		WM_CORE_LOCK(sc);
   3206 		/* Flow control requires full-duplex mode. */
   3207 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3208 		    (ifr->ifr_media & IFM_FDX) == 0)
   3209 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3210 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3211 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3212 				/* We can do both TXPAUSE and RXPAUSE. */
   3213 				ifr->ifr_media |=
   3214 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3215 			}
   3216 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3217 		}
   3218 		WM_CORE_UNLOCK(sc);
   3219 #ifdef WM_MPSAFE
   3220 		s = splnet();
   3221 #endif
   3222 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3223 #ifdef WM_MPSAFE
   3224 		splx(s);
   3225 #endif
   3226 		break;
   3227 	case SIOCINITIFADDR:
   3228 		WM_CORE_LOCK(sc);
   3229 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3230 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3231 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3232 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3233 			/* unicast address is first multicast entry */
   3234 			wm_set_filter(sc);
   3235 			error = 0;
   3236 			WM_CORE_UNLOCK(sc);
   3237 			break;
   3238 		}
   3239 		WM_CORE_UNLOCK(sc);
   3240 		/*FALLTHROUGH*/
   3241 	default:
   3242 #ifdef WM_MPSAFE
   3243 		s = splnet();
   3244 #endif
   3245 		/* It may call wm_start, so unlock here */
   3246 		error = ether_ioctl(ifp, cmd, data);
   3247 #ifdef WM_MPSAFE
   3248 		splx(s);
   3249 #endif
   3250 		if (error != ENETRESET)
   3251 			break;
   3252 
   3253 		error = 0;
   3254 
   3255 		if (cmd == SIOCSIFCAP) {
   3256 			error = (*ifp->if_init)(ifp);
   3257 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3258 			;
   3259 		else if (ifp->if_flags & IFF_RUNNING) {
   3260 			/*
   3261 			 * Multicast list has changed; set the hardware filter
   3262 			 * accordingly.
   3263 			 */
   3264 			WM_CORE_LOCK(sc);
   3265 			wm_set_filter(sc);
   3266 			WM_CORE_UNLOCK(sc);
   3267 		}
   3268 		break;
   3269 	}
   3270 
   3271 #ifndef WM_MPSAFE
   3272 	splx(s);
   3273 #endif
   3274 	return error;
   3275 }
   3276 
   3277 /* MAC address related */
   3278 
   3279 /*
   3280  * Get the offset of MAC address and return it.
   3281  * If error occured, use offset 0.
   3282  */
   3283 static uint16_t
   3284 wm_check_alt_mac_addr(struct wm_softc *sc)
   3285 {
   3286 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3287 	uint16_t offset = NVM_OFF_MACADDR;
   3288 
   3289 	/* Try to read alternative MAC address pointer */
   3290 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3291 		return 0;
   3292 
   3293 	/* Check pointer if it's valid or not. */
   3294 	if ((offset == 0x0000) || (offset == 0xffff))
   3295 		return 0;
   3296 
   3297 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3298 	/*
   3299 	 * Check whether alternative MAC address is valid or not.
   3300 	 * Some cards have non 0xffff pointer but those don't use
   3301 	 * alternative MAC address in reality.
   3302 	 *
   3303 	 * Check whether the broadcast bit is set or not.
   3304 	 */
   3305 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3306 		if (((myea[0] & 0xff) & 0x01) == 0)
   3307 			return offset; /* Found */
   3308 
   3309 	/* Not found */
   3310 	return 0;
   3311 }
   3312 
   3313 static int
   3314 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3315 {
   3316 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3317 	uint16_t offset = NVM_OFF_MACADDR;
   3318 	int do_invert = 0;
   3319 
   3320 	switch (sc->sc_type) {
   3321 	case WM_T_82580:
   3322 	case WM_T_I350:
   3323 	case WM_T_I354:
   3324 		/* EEPROM Top Level Partitioning */
   3325 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3326 		break;
   3327 	case WM_T_82571:
   3328 	case WM_T_82575:
   3329 	case WM_T_82576:
   3330 	case WM_T_80003:
   3331 	case WM_T_I210:
   3332 	case WM_T_I211:
   3333 		offset = wm_check_alt_mac_addr(sc);
   3334 		if (offset == 0)
   3335 			if ((sc->sc_funcid & 0x01) == 1)
   3336 				do_invert = 1;
   3337 		break;
   3338 	default:
   3339 		if ((sc->sc_funcid & 0x01) == 1)
   3340 			do_invert = 1;
   3341 		break;
   3342 	}
   3343 
   3344 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3345 		goto bad;
   3346 
   3347 	enaddr[0] = myea[0] & 0xff;
   3348 	enaddr[1] = myea[0] >> 8;
   3349 	enaddr[2] = myea[1] & 0xff;
   3350 	enaddr[3] = myea[1] >> 8;
   3351 	enaddr[4] = myea[2] & 0xff;
   3352 	enaddr[5] = myea[2] >> 8;
   3353 
   3354 	/*
   3355 	 * Toggle the LSB of the MAC address on the second port
   3356 	 * of some dual port cards.
   3357 	 */
   3358 	if (do_invert != 0)
   3359 		enaddr[5] ^= 1;
   3360 
   3361 	return 0;
   3362 
   3363  bad:
   3364 	return -1;
   3365 }
   3366 
   3367 /*
   3368  * wm_set_ral:
   3369  *
   3370  *	Set an entery in the receive address list.
   3371  */
   3372 static void
   3373 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3374 {
   3375 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3376 	uint32_t wlock_mac;
   3377 	int rv;
   3378 
   3379 	if (enaddr != NULL) {
   3380 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3381 		    (enaddr[3] << 24);
   3382 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3383 		ral_hi |= RAL_AV;
   3384 	} else {
   3385 		ral_lo = 0;
   3386 		ral_hi = 0;
   3387 	}
   3388 
   3389 	switch (sc->sc_type) {
   3390 	case WM_T_82542_2_0:
   3391 	case WM_T_82542_2_1:
   3392 	case WM_T_82543:
   3393 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3394 		CSR_WRITE_FLUSH(sc);
   3395 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3396 		CSR_WRITE_FLUSH(sc);
   3397 		break;
   3398 	case WM_T_PCH2:
   3399 	case WM_T_PCH_LPT:
   3400 	case WM_T_PCH_SPT:
   3401 	case WM_T_PCH_CNP:
   3402 		if (idx == 0) {
   3403 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3404 			CSR_WRITE_FLUSH(sc);
   3405 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3406 			CSR_WRITE_FLUSH(sc);
   3407 			return;
   3408 		}
   3409 		if (sc->sc_type != WM_T_PCH2) {
   3410 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3411 			    FWSM_WLOCK_MAC);
   3412 			addrl = WMREG_SHRAL(idx - 1);
   3413 			addrh = WMREG_SHRAH(idx - 1);
   3414 		} else {
   3415 			wlock_mac = 0;
   3416 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3417 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3418 		}
   3419 
   3420 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3421 			rv = wm_get_swflag_ich8lan(sc);
   3422 			if (rv != 0)
   3423 				return;
   3424 			CSR_WRITE(sc, addrl, ral_lo);
   3425 			CSR_WRITE_FLUSH(sc);
   3426 			CSR_WRITE(sc, addrh, ral_hi);
   3427 			CSR_WRITE_FLUSH(sc);
   3428 			wm_put_swflag_ich8lan(sc);
   3429 		}
   3430 
   3431 		break;
   3432 	default:
   3433 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3434 		CSR_WRITE_FLUSH(sc);
   3435 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3436 		CSR_WRITE_FLUSH(sc);
   3437 		break;
   3438 	}
   3439 }
   3440 
   3441 /*
   3442  * wm_mchash:
   3443  *
   3444  *	Compute the hash of the multicast address for the 4096-bit
   3445  *	multicast filter.
   3446  */
   3447 static uint32_t
   3448 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3449 {
   3450 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3451 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3452 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3453 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3454 	uint32_t hash;
   3455 
   3456 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3457 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3458 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3459 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3460 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3461 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3462 		return (hash & 0x3ff);
   3463 	}
   3464 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3465 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3466 
   3467 	return (hash & 0xfff);
   3468 }
   3469 
   3470 /*
   3471  * wm_set_filter:
   3472  *
   3473  *	Set up the receive filter.
   3474  */
   3475 static void
   3476 wm_set_filter(struct wm_softc *sc)
   3477 {
   3478 	struct ethercom *ec = &sc->sc_ethercom;
   3479 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3480 	struct ether_multi *enm;
   3481 	struct ether_multistep step;
   3482 	bus_addr_t mta_reg;
   3483 	uint32_t hash, reg, bit;
   3484 	int i, size, ralmax;
   3485 
   3486 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3487 		device_xname(sc->sc_dev), __func__));
   3488 
   3489 	if (sc->sc_type >= WM_T_82544)
   3490 		mta_reg = WMREG_CORDOVA_MTA;
   3491 	else
   3492 		mta_reg = WMREG_MTA;
   3493 
   3494 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3495 
   3496 	if (ifp->if_flags & IFF_BROADCAST)
   3497 		sc->sc_rctl |= RCTL_BAM;
   3498 	if (ifp->if_flags & IFF_PROMISC) {
   3499 		sc->sc_rctl |= RCTL_UPE;
   3500 		goto allmulti;
   3501 	}
   3502 
   3503 	/*
   3504 	 * Set the station address in the first RAL slot, and
   3505 	 * clear the remaining slots.
   3506 	 */
   3507 	if (sc->sc_type == WM_T_ICH8)
   3508 		size = WM_RAL_TABSIZE_ICH8 -1;
   3509 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3510 	    || (sc->sc_type == WM_T_PCH))
   3511 		size = WM_RAL_TABSIZE_ICH8;
   3512 	else if (sc->sc_type == WM_T_PCH2)
   3513 		size = WM_RAL_TABSIZE_PCH2;
   3514 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3515 	    || (sc->sc_type == WM_T_PCH_CNP))
   3516 		size = WM_RAL_TABSIZE_PCH_LPT;
   3517 	else if (sc->sc_type == WM_T_82575)
   3518 		size = WM_RAL_TABSIZE_82575;
   3519 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3520 		size = WM_RAL_TABSIZE_82576;
   3521 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3522 		size = WM_RAL_TABSIZE_I350;
   3523 	else
   3524 		size = WM_RAL_TABSIZE;
   3525 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3526 
   3527 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3528 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3529 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3530 		switch (i) {
   3531 		case 0:
   3532 			/* We can use all entries */
   3533 			ralmax = size;
   3534 			break;
   3535 		case 1:
   3536 			/* Only RAR[0] */
   3537 			ralmax = 1;
   3538 			break;
   3539 		default:
   3540 			/* available SHRA + RAR[0] */
   3541 			ralmax = i + 1;
   3542 		}
   3543 	} else
   3544 		ralmax = size;
   3545 	for (i = 1; i < size; i++) {
   3546 		if (i < ralmax)
   3547 			wm_set_ral(sc, NULL, i);
   3548 	}
   3549 
   3550 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3551 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3552 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3553 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3554 		size = WM_ICH8_MC_TABSIZE;
   3555 	else
   3556 		size = WM_MC_TABSIZE;
   3557 	/* Clear out the multicast table. */
   3558 	for (i = 0; i < size; i++) {
   3559 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3560 		CSR_WRITE_FLUSH(sc);
   3561 	}
   3562 
   3563 	ETHER_LOCK(ec);
   3564 	ETHER_FIRST_MULTI(step, ec, enm);
   3565 	while (enm != NULL) {
   3566 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3567 			ETHER_UNLOCK(ec);
   3568 			/*
   3569 			 * We must listen to a range of multicast addresses.
   3570 			 * For now, just accept all multicasts, rather than
   3571 			 * trying to set only those filter bits needed to match
   3572 			 * the range.  (At this time, the only use of address
   3573 			 * ranges is for IP multicast routing, for which the
   3574 			 * range is big enough to require all bits set.)
   3575 			 */
   3576 			goto allmulti;
   3577 		}
   3578 
   3579 		hash = wm_mchash(sc, enm->enm_addrlo);
   3580 
   3581 		reg = (hash >> 5);
   3582 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3583 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3584 		    || (sc->sc_type == WM_T_PCH2)
   3585 		    || (sc->sc_type == WM_T_PCH_LPT)
   3586 		    || (sc->sc_type == WM_T_PCH_SPT)
   3587 		    || (sc->sc_type == WM_T_PCH_CNP))
   3588 			reg &= 0x1f;
   3589 		else
   3590 			reg &= 0x7f;
   3591 		bit = hash & 0x1f;
   3592 
   3593 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3594 		hash |= 1U << bit;
   3595 
   3596 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3597 			/*
   3598 			 * 82544 Errata 9: Certain register cannot be written
   3599 			 * with particular alignments in PCI-X bus operation
   3600 			 * (FCAH, MTA and VFTA).
   3601 			 */
   3602 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3603 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3604 			CSR_WRITE_FLUSH(sc);
   3605 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3606 			CSR_WRITE_FLUSH(sc);
   3607 		} else {
   3608 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3609 			CSR_WRITE_FLUSH(sc);
   3610 		}
   3611 
   3612 		ETHER_NEXT_MULTI(step, enm);
   3613 	}
   3614 	ETHER_UNLOCK(ec);
   3615 
   3616 	ifp->if_flags &= ~IFF_ALLMULTI;
   3617 	goto setit;
   3618 
   3619  allmulti:
   3620 	ifp->if_flags |= IFF_ALLMULTI;
   3621 	sc->sc_rctl |= RCTL_MPE;
   3622 
   3623  setit:
   3624 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3625 }
   3626 
   3627 /* Reset and init related */
   3628 
   3629 static void
   3630 wm_set_vlan(struct wm_softc *sc)
   3631 {
   3632 
   3633 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3634 		device_xname(sc->sc_dev), __func__));
   3635 
   3636 	/* Deal with VLAN enables. */
   3637 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3638 		sc->sc_ctrl |= CTRL_VME;
   3639 	else
   3640 		sc->sc_ctrl &= ~CTRL_VME;
   3641 
   3642 	/* Write the control registers. */
   3643 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3644 }
   3645 
   3646 static void
   3647 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3648 {
   3649 	uint32_t gcr;
   3650 	pcireg_t ctrl2;
   3651 
   3652 	gcr = CSR_READ(sc, WMREG_GCR);
   3653 
   3654 	/* Only take action if timeout value is defaulted to 0 */
   3655 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3656 		goto out;
   3657 
   3658 	if ((gcr & GCR_CAP_VER2) == 0) {
   3659 		gcr |= GCR_CMPL_TMOUT_10MS;
   3660 		goto out;
   3661 	}
   3662 
   3663 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3664 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3665 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3666 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3667 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3668 
   3669 out:
   3670 	/* Disable completion timeout resend */
   3671 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3672 
   3673 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3674 }
   3675 
   3676 void
   3677 wm_get_auto_rd_done(struct wm_softc *sc)
   3678 {
   3679 	int i;
   3680 
   3681 	/* wait for eeprom to reload */
   3682 	switch (sc->sc_type) {
   3683 	case WM_T_82571:
   3684 	case WM_T_82572:
   3685 	case WM_T_82573:
   3686 	case WM_T_82574:
   3687 	case WM_T_82583:
   3688 	case WM_T_82575:
   3689 	case WM_T_82576:
   3690 	case WM_T_82580:
   3691 	case WM_T_I350:
   3692 	case WM_T_I354:
   3693 	case WM_T_I210:
   3694 	case WM_T_I211:
   3695 	case WM_T_80003:
   3696 	case WM_T_ICH8:
   3697 	case WM_T_ICH9:
   3698 		for (i = 0; i < 10; i++) {
   3699 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3700 				break;
   3701 			delay(1000);
   3702 		}
   3703 		if (i == 10) {
   3704 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3705 			    "complete\n", device_xname(sc->sc_dev));
   3706 		}
   3707 		break;
   3708 	default:
   3709 		break;
   3710 	}
   3711 }
   3712 
   3713 void
   3714 wm_lan_init_done(struct wm_softc *sc)
   3715 {
   3716 	uint32_t reg = 0;
   3717 	int i;
   3718 
   3719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3720 		device_xname(sc->sc_dev), __func__));
   3721 
   3722 	/* Wait for eeprom to reload */
   3723 	switch (sc->sc_type) {
   3724 	case WM_T_ICH10:
   3725 	case WM_T_PCH:
   3726 	case WM_T_PCH2:
   3727 	case WM_T_PCH_LPT:
   3728 	case WM_T_PCH_SPT:
   3729 	case WM_T_PCH_CNP:
   3730 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3731 			reg = CSR_READ(sc, WMREG_STATUS);
   3732 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3733 				break;
   3734 			delay(100);
   3735 		}
   3736 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3737 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3738 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3739 		}
   3740 		break;
   3741 	default:
   3742 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3743 		    __func__);
   3744 		break;
   3745 	}
   3746 
   3747 	reg &= ~STATUS_LAN_INIT_DONE;
   3748 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3749 }
   3750 
   3751 void
   3752 wm_get_cfg_done(struct wm_softc *sc)
   3753 {
   3754 	int mask;
   3755 	uint32_t reg;
   3756 	int i;
   3757 
   3758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3759 		device_xname(sc->sc_dev), __func__));
   3760 
   3761 	/* Wait for eeprom to reload */
   3762 	switch (sc->sc_type) {
   3763 	case WM_T_82542_2_0:
   3764 	case WM_T_82542_2_1:
   3765 		/* null */
   3766 		break;
   3767 	case WM_T_82543:
   3768 	case WM_T_82544:
   3769 	case WM_T_82540:
   3770 	case WM_T_82545:
   3771 	case WM_T_82545_3:
   3772 	case WM_T_82546:
   3773 	case WM_T_82546_3:
   3774 	case WM_T_82541:
   3775 	case WM_T_82541_2:
   3776 	case WM_T_82547:
   3777 	case WM_T_82547_2:
   3778 	case WM_T_82573:
   3779 	case WM_T_82574:
   3780 	case WM_T_82583:
   3781 		/* generic */
   3782 		delay(10*1000);
   3783 		break;
   3784 	case WM_T_80003:
   3785 	case WM_T_82571:
   3786 	case WM_T_82572:
   3787 	case WM_T_82575:
   3788 	case WM_T_82576:
   3789 	case WM_T_82580:
   3790 	case WM_T_I350:
   3791 	case WM_T_I354:
   3792 	case WM_T_I210:
   3793 	case WM_T_I211:
   3794 		if (sc->sc_type == WM_T_82571) {
   3795 			/* Only 82571 shares port 0 */
   3796 			mask = EEMNGCTL_CFGDONE_0;
   3797 		} else
   3798 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3799 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3800 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3801 				break;
   3802 			delay(1000);
   3803 		}
   3804 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3805 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3806 				device_xname(sc->sc_dev), __func__));
   3807 		}
   3808 		break;
   3809 	case WM_T_ICH8:
   3810 	case WM_T_ICH9:
   3811 	case WM_T_ICH10:
   3812 	case WM_T_PCH:
   3813 	case WM_T_PCH2:
   3814 	case WM_T_PCH_LPT:
   3815 	case WM_T_PCH_SPT:
   3816 	case WM_T_PCH_CNP:
   3817 		delay(10*1000);
   3818 		if (sc->sc_type >= WM_T_ICH10)
   3819 			wm_lan_init_done(sc);
   3820 		else
   3821 			wm_get_auto_rd_done(sc);
   3822 
   3823 		reg = CSR_READ(sc, WMREG_STATUS);
   3824 		if ((reg & STATUS_PHYRA) != 0)
   3825 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3826 		break;
   3827 	default:
   3828 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3829 		    __func__);
   3830 		break;
   3831 	}
   3832 }
   3833 
   3834 void
   3835 wm_phy_post_reset(struct wm_softc *sc)
   3836 {
   3837 	uint32_t reg;
   3838 
   3839 	/* This function is only for ICH8 and newer. */
   3840 	if (sc->sc_type < WM_T_ICH8)
   3841 		return;
   3842 
   3843 	if (wm_phy_resetisblocked(sc)) {
   3844 		/* XXX */
   3845 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3846 		return;
   3847 	}
   3848 
   3849 	/* Allow time for h/w to get to quiescent state after reset */
   3850 	delay(10*1000);
   3851 
   3852 	/* Perform any necessary post-reset workarounds */
   3853 	if (sc->sc_type == WM_T_PCH)
   3854 		wm_hv_phy_workaround_ich8lan(sc);
   3855 	if (sc->sc_type == WM_T_PCH2)
   3856 		wm_lv_phy_workaround_ich8lan(sc);
   3857 
   3858 	/* Clear the host wakeup bit after lcd reset */
   3859 	if (sc->sc_type >= WM_T_PCH) {
   3860 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3861 		    BM_PORT_GEN_CFG);
   3862 		reg &= ~BM_WUC_HOST_WU_BIT;
   3863 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3864 		    BM_PORT_GEN_CFG, reg);
   3865 	}
   3866 
   3867 	/* Configure the LCD with the extended configuration region in NVM */
   3868 	wm_init_lcd_from_nvm(sc);
   3869 
   3870 	/* Configure the LCD with the OEM bits in NVM */
   3871 }
   3872 
   3873 /* Only for PCH and newer */
   3874 static void
   3875 wm_write_smbus_addr(struct wm_softc *sc)
   3876 {
   3877 	uint32_t strap, freq;
   3878 	uint32_t phy_data;
   3879 
   3880 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3881 		device_xname(sc->sc_dev), __func__));
   3882 
   3883 	strap = CSR_READ(sc, WMREG_STRAP);
   3884 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3885 
   3886 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3887 
   3888 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3889 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3890 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3891 
   3892 	if (sc->sc_phytype == WMPHY_I217) {
   3893 		/* Restore SMBus frequency */
   3894 		if (freq --) {
   3895 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3896 			    | HV_SMB_ADDR_FREQ_HIGH);
   3897 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3898 			    HV_SMB_ADDR_FREQ_LOW);
   3899 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3900 			    HV_SMB_ADDR_FREQ_HIGH);
   3901 		} else {
   3902 			DPRINTF(WM_DEBUG_INIT,
   3903 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3904 				device_xname(sc->sc_dev), __func__));
   3905 		}
   3906 	}
   3907 
   3908 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3909 }
   3910 
   3911 void
   3912 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3913 {
   3914 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3915 	uint16_t phy_page = 0;
   3916 
   3917 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3918 		device_xname(sc->sc_dev), __func__));
   3919 
   3920 	switch (sc->sc_type) {
   3921 	case WM_T_ICH8:
   3922 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3923 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3924 			return;
   3925 
   3926 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3927 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3928 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3929 			break;
   3930 		}
   3931 		/* FALLTHROUGH */
   3932 	case WM_T_PCH:
   3933 	case WM_T_PCH2:
   3934 	case WM_T_PCH_LPT:
   3935 	case WM_T_PCH_SPT:
   3936 	case WM_T_PCH_CNP:
   3937 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3938 		break;
   3939 	default:
   3940 		return;
   3941 	}
   3942 
   3943 	sc->phy.acquire(sc);
   3944 
   3945 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3946 	if ((reg & sw_cfg_mask) == 0)
   3947 		goto release;
   3948 
   3949 	/*
   3950 	 * Make sure HW does not configure LCD from PHY extended configuration
   3951 	 * before SW configuration
   3952 	 */
   3953 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3954 	if ((sc->sc_type < WM_T_PCH2)
   3955 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3956 		goto release;
   3957 
   3958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3959 		device_xname(sc->sc_dev), __func__));
   3960 	/* word_addr is in DWORD */
   3961 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3962 
   3963 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3964 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3965 	if (cnf_size == 0)
   3966 		goto release;
   3967 
   3968 	if (((sc->sc_type == WM_T_PCH)
   3969 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3970 	    || (sc->sc_type > WM_T_PCH)) {
   3971 		/*
   3972 		 * HW configures the SMBus address and LEDs when the OEM and
   3973 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3974 		 * are cleared, SW will configure them instead.
   3975 		 */
   3976 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3977 			device_xname(sc->sc_dev), __func__));
   3978 		wm_write_smbus_addr(sc);
   3979 
   3980 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3981 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3982 	}
   3983 
   3984 	/* Configure LCD from extended configuration region. */
   3985 	for (i = 0; i < cnf_size; i++) {
   3986 		uint16_t reg_data, reg_addr;
   3987 
   3988 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3989 			goto release;
   3990 
   3991 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3992 			goto release;
   3993 
   3994 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3995 			phy_page = reg_data;
   3996 
   3997 		reg_addr &= IGPHY_MAXREGADDR;
   3998 		reg_addr |= phy_page;
   3999 
   4000 		sc->phy.release(sc); /* XXX */
   4001 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4002 		sc->phy.acquire(sc); /* XXX */
   4003 	}
   4004 
   4005 release:
   4006 	sc->phy.release(sc);
   4007 	return;
   4008 }
   4009 
   4010 
   4011 /* Init hardware bits */
   4012 void
   4013 wm_initialize_hardware_bits(struct wm_softc *sc)
   4014 {
   4015 	uint32_t tarc0, tarc1, reg;
   4016 
   4017 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4018 		device_xname(sc->sc_dev), __func__));
   4019 
   4020 	/* For 82571 variant, 80003 and ICHs */
   4021 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4022 	    || (sc->sc_type >= WM_T_80003)) {
   4023 
   4024 		/* Transmit Descriptor Control 0 */
   4025 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4026 		reg |= TXDCTL_COUNT_DESC;
   4027 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4028 
   4029 		/* Transmit Descriptor Control 1 */
   4030 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4031 		reg |= TXDCTL_COUNT_DESC;
   4032 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4033 
   4034 		/* TARC0 */
   4035 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4036 		switch (sc->sc_type) {
   4037 		case WM_T_82571:
   4038 		case WM_T_82572:
   4039 		case WM_T_82573:
   4040 		case WM_T_82574:
   4041 		case WM_T_82583:
   4042 		case WM_T_80003:
   4043 			/* Clear bits 30..27 */
   4044 			tarc0 &= ~__BITS(30, 27);
   4045 			break;
   4046 		default:
   4047 			break;
   4048 		}
   4049 
   4050 		switch (sc->sc_type) {
   4051 		case WM_T_82571:
   4052 		case WM_T_82572:
   4053 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4054 
   4055 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4056 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4057 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4058 			/* 8257[12] Errata No.7 */
   4059 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4060 
   4061 			/* TARC1 bit 28 */
   4062 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4063 				tarc1 &= ~__BIT(28);
   4064 			else
   4065 				tarc1 |= __BIT(28);
   4066 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4067 
   4068 			/*
   4069 			 * 8257[12] Errata No.13
   4070 			 * Disable Dyamic Clock Gating.
   4071 			 */
   4072 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4073 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4074 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4075 			break;
   4076 		case WM_T_82573:
   4077 		case WM_T_82574:
   4078 		case WM_T_82583:
   4079 			if ((sc->sc_type == WM_T_82574)
   4080 			    || (sc->sc_type == WM_T_82583))
   4081 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4082 
   4083 			/* Extended Device Control */
   4084 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4085 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4086 			reg |= __BIT(22);	/* Set bit 22 */
   4087 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4088 
   4089 			/* Device Control */
   4090 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4091 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4092 
   4093 			/* PCIe Control Register */
   4094 			/*
   4095 			 * 82573 Errata (unknown).
   4096 			 *
   4097 			 * 82574 Errata 25 and 82583 Errata 12
   4098 			 * "Dropped Rx Packets":
   4099 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4100 			 */
   4101 			reg = CSR_READ(sc, WMREG_GCR);
   4102 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4103 			CSR_WRITE(sc, WMREG_GCR, reg);
   4104 
   4105 			if ((sc->sc_type == WM_T_82574)
   4106 			    || (sc->sc_type == WM_T_82583)) {
   4107 				/*
   4108 				 * Document says this bit must be set for
   4109 				 * proper operation.
   4110 				 */
   4111 				reg = CSR_READ(sc, WMREG_GCR);
   4112 				reg |= __BIT(22);
   4113 				CSR_WRITE(sc, WMREG_GCR, reg);
   4114 
   4115 				/*
   4116 				 * Apply workaround for hardware errata
   4117 				 * documented in errata docs Fixes issue where
   4118 				 * some error prone or unreliable PCIe
   4119 				 * completions are occurring, particularly
   4120 				 * with ASPM enabled. Without fix, issue can
   4121 				 * cause Tx timeouts.
   4122 				 */
   4123 				reg = CSR_READ(sc, WMREG_GCR2);
   4124 				reg |= __BIT(0);
   4125 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4126 			}
   4127 			break;
   4128 		case WM_T_80003:
   4129 			/* TARC0 */
   4130 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4131 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4132 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4133 
   4134 			/* TARC1 bit 28 */
   4135 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4136 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4137 				tarc1 &= ~__BIT(28);
   4138 			else
   4139 				tarc1 |= __BIT(28);
   4140 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4141 			break;
   4142 		case WM_T_ICH8:
   4143 		case WM_T_ICH9:
   4144 		case WM_T_ICH10:
   4145 		case WM_T_PCH:
   4146 		case WM_T_PCH2:
   4147 		case WM_T_PCH_LPT:
   4148 		case WM_T_PCH_SPT:
   4149 		case WM_T_PCH_CNP:
   4150 			/* TARC0 */
   4151 			if (sc->sc_type == WM_T_ICH8) {
   4152 				/* Set TARC0 bits 29 and 28 */
   4153 				tarc0 |= __BITS(29, 28);
   4154 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4155 				tarc0 |= __BIT(29);
   4156 				/*
   4157 				 *  Drop bit 28. From Linux.
   4158 				 * See I218/I219 spec update
   4159 				 * "5. Buffer Overrun While the I219 is
   4160 				 * Processing DMA Transactions"
   4161 				 */
   4162 				tarc0 &= ~__BIT(28);
   4163 			}
   4164 			/* Set TARC0 bits 23,24,26,27 */
   4165 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4166 
   4167 			/* CTRL_EXT */
   4168 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4169 			reg |= __BIT(22);	/* Set bit 22 */
   4170 			/*
   4171 			 * Enable PHY low-power state when MAC is at D3
   4172 			 * w/o WoL
   4173 			 */
   4174 			if (sc->sc_type >= WM_T_PCH)
   4175 				reg |= CTRL_EXT_PHYPDEN;
   4176 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4177 
   4178 			/* TARC1 */
   4179 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4180 			/* bit 28 */
   4181 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4182 				tarc1 &= ~__BIT(28);
   4183 			else
   4184 				tarc1 |= __BIT(28);
   4185 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4186 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4187 
   4188 			/* Device Status */
   4189 			if (sc->sc_type == WM_T_ICH8) {
   4190 				reg = CSR_READ(sc, WMREG_STATUS);
   4191 				reg &= ~__BIT(31);
   4192 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4193 
   4194 			}
   4195 
   4196 			/* IOSFPC */
   4197 			if (sc->sc_type == WM_T_PCH_SPT) {
   4198 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4199 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4200 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4201 			}
   4202 			/*
   4203 			 * Work-around descriptor data corruption issue during
   4204 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4205 			 * capability.
   4206 			 */
   4207 			reg = CSR_READ(sc, WMREG_RFCTL);
   4208 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4209 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4210 			break;
   4211 		default:
   4212 			break;
   4213 		}
   4214 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4215 
   4216 		switch (sc->sc_type) {
   4217 		/*
   4218 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4219 		 * Avoid RSS Hash Value bug.
   4220 		 */
   4221 		case WM_T_82571:
   4222 		case WM_T_82572:
   4223 		case WM_T_82573:
   4224 		case WM_T_80003:
   4225 		case WM_T_ICH8:
   4226 			reg = CSR_READ(sc, WMREG_RFCTL);
   4227 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4228 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4229 			break;
   4230 		case WM_T_82574:
   4231 			/* use extened Rx descriptor. */
   4232 			reg = CSR_READ(sc, WMREG_RFCTL);
   4233 			reg |= WMREG_RFCTL_EXSTEN;
   4234 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4235 			break;
   4236 		default:
   4237 			break;
   4238 		}
   4239 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4240 		/*
   4241 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4242 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4243 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4244 		 * Correctly by the Device"
   4245 		 *
   4246 		 * I354(C2000) Errata AVR53:
   4247 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4248 		 * Hang"
   4249 		 */
   4250 		reg = CSR_READ(sc, WMREG_RFCTL);
   4251 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4252 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4253 	}
   4254 }
   4255 
   4256 static uint32_t
   4257 wm_rxpbs_adjust_82580(uint32_t val)
   4258 {
   4259 	uint32_t rv = 0;
   4260 
   4261 	if (val < __arraycount(wm_82580_rxpbs_table))
   4262 		rv = wm_82580_rxpbs_table[val];
   4263 
   4264 	return rv;
   4265 }
   4266 
   4267 /*
   4268  * wm_reset_phy:
   4269  *
   4270  *	generic PHY reset function.
   4271  *	Same as e1000_phy_hw_reset_generic()
   4272  */
   4273 static void
   4274 wm_reset_phy(struct wm_softc *sc)
   4275 {
   4276 	uint32_t reg;
   4277 
   4278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4279 		device_xname(sc->sc_dev), __func__));
   4280 	if (wm_phy_resetisblocked(sc))
   4281 		return;
   4282 
   4283 	sc->phy.acquire(sc);
   4284 
   4285 	reg = CSR_READ(sc, WMREG_CTRL);
   4286 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4287 	CSR_WRITE_FLUSH(sc);
   4288 
   4289 	delay(sc->phy.reset_delay_us);
   4290 
   4291 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4292 	CSR_WRITE_FLUSH(sc);
   4293 
   4294 	delay(150);
   4295 
   4296 	sc->phy.release(sc);
   4297 
   4298 	wm_get_cfg_done(sc);
   4299 	wm_phy_post_reset(sc);
   4300 }
   4301 
   4302 /*
   4303  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4304  * so it is enough to check sc->sc_queue[0] only.
   4305  */
   4306 static void
   4307 wm_flush_desc_rings(struct wm_softc *sc)
   4308 {
   4309 	pcireg_t preg;
   4310 	uint32_t reg;
   4311 	struct wm_txqueue *txq;
   4312 	wiseman_txdesc_t *txd;
   4313 	int nexttx;
   4314 	uint32_t rctl;
   4315 
   4316 	/* First, disable MULR fix in FEXTNVM11 */
   4317 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4318 	reg |= FEXTNVM11_DIS_MULRFIX;
   4319 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4320 
   4321 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4322 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4323 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4324 		return;
   4325 
   4326 	/* TX */
   4327 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4328 	    device_xname(sc->sc_dev), preg, reg);
   4329 	reg = CSR_READ(sc, WMREG_TCTL);
   4330 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4331 
   4332 	txq = &sc->sc_queue[0].wmq_txq;
   4333 	nexttx = txq->txq_next;
   4334 	txd = &txq->txq_descs[nexttx];
   4335 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4336 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4337 	txd->wtx_fields.wtxu_status = 0;
   4338 	txd->wtx_fields.wtxu_options = 0;
   4339 	txd->wtx_fields.wtxu_vlan = 0;
   4340 
   4341 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4342 	    BUS_SPACE_BARRIER_WRITE);
   4343 
   4344 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4345 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4346 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4347 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4348 	delay(250);
   4349 
   4350 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4351 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4352 		return;
   4353 
   4354 	/* RX */
   4355 	printf("%s: Need RX flush (reg = %08x)\n",
   4356 	    device_xname(sc->sc_dev), preg);
   4357 	rctl = CSR_READ(sc, WMREG_RCTL);
   4358 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4359 	CSR_WRITE_FLUSH(sc);
   4360 	delay(150);
   4361 
   4362 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4363 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4364 	reg &= 0xffffc000;
   4365 	/*
   4366 	 * update thresholds: prefetch threshold to 31, host threshold
   4367 	 * to 1 and make sure the granularity is "descriptors" and not
   4368 	 * "cache lines"
   4369 	 */
   4370 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4371 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4372 
   4373 	/*
   4374 	 * momentarily enable the RX ring for the changes to take
   4375 	 * effect
   4376 	 */
   4377 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4378 	CSR_WRITE_FLUSH(sc);
   4379 	delay(150);
   4380 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4381 }
   4382 
   4383 /*
   4384  * wm_reset:
   4385  *
   4386  *	Reset the i82542 chip.
   4387  */
   4388 static void
   4389 wm_reset(struct wm_softc *sc)
   4390 {
   4391 	int phy_reset = 0;
   4392 	int i, error = 0;
   4393 	uint32_t reg;
   4394 	uint16_t kmreg;
   4395 	int rv;
   4396 
   4397 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4398 		device_xname(sc->sc_dev), __func__));
   4399 	KASSERT(sc->sc_type != 0);
   4400 
   4401 	/*
   4402 	 * Allocate on-chip memory according to the MTU size.
   4403 	 * The Packet Buffer Allocation register must be written
   4404 	 * before the chip is reset.
   4405 	 */
   4406 	switch (sc->sc_type) {
   4407 	case WM_T_82547:
   4408 	case WM_T_82547_2:
   4409 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4410 		    PBA_22K : PBA_30K;
   4411 		for (i = 0; i < sc->sc_nqueues; i++) {
   4412 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4413 			txq->txq_fifo_head = 0;
   4414 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4415 			txq->txq_fifo_size =
   4416 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4417 			txq->txq_fifo_stall = 0;
   4418 		}
   4419 		break;
   4420 	case WM_T_82571:
   4421 	case WM_T_82572:
   4422 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4423 	case WM_T_80003:
   4424 		sc->sc_pba = PBA_32K;
   4425 		break;
   4426 	case WM_T_82573:
   4427 		sc->sc_pba = PBA_12K;
   4428 		break;
   4429 	case WM_T_82574:
   4430 	case WM_T_82583:
   4431 		sc->sc_pba = PBA_20K;
   4432 		break;
   4433 	case WM_T_82576:
   4434 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4435 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4436 		break;
   4437 	case WM_T_82580:
   4438 	case WM_T_I350:
   4439 	case WM_T_I354:
   4440 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4441 		break;
   4442 	case WM_T_I210:
   4443 	case WM_T_I211:
   4444 		sc->sc_pba = PBA_34K;
   4445 		break;
   4446 	case WM_T_ICH8:
   4447 		/* Workaround for a bit corruption issue in FIFO memory */
   4448 		sc->sc_pba = PBA_8K;
   4449 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4450 		break;
   4451 	case WM_T_ICH9:
   4452 	case WM_T_ICH10:
   4453 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4454 		    PBA_14K : PBA_10K;
   4455 		break;
   4456 	case WM_T_PCH:
   4457 	case WM_T_PCH2:	/* XXX 14K? */
   4458 	case WM_T_PCH_LPT:
   4459 	case WM_T_PCH_SPT:
   4460 	case WM_T_PCH_CNP:
   4461 		sc->sc_pba = PBA_26K;
   4462 		break;
   4463 	default:
   4464 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4465 		    PBA_40K : PBA_48K;
   4466 		break;
   4467 	}
   4468 	/*
   4469 	 * Only old or non-multiqueue devices have the PBA register
   4470 	 * XXX Need special handling for 82575.
   4471 	 */
   4472 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4473 	    || (sc->sc_type == WM_T_82575))
   4474 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4475 
   4476 	/* Prevent the PCI-E bus from sticking */
   4477 	if (sc->sc_flags & WM_F_PCIE) {
   4478 		int timeout = 800;
   4479 
   4480 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4481 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4482 
   4483 		while (timeout--) {
   4484 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4485 			    == 0)
   4486 				break;
   4487 			delay(100);
   4488 		}
   4489 		if (timeout == 0)
   4490 			device_printf(sc->sc_dev,
   4491 			    "failed to disable busmastering\n");
   4492 	}
   4493 
   4494 	/* Set the completion timeout for interface */
   4495 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4496 	    || (sc->sc_type == WM_T_82580)
   4497 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4498 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4499 		wm_set_pcie_completion_timeout(sc);
   4500 
   4501 	/* Clear interrupt */
   4502 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4503 	if (wm_is_using_msix(sc)) {
   4504 		if (sc->sc_type != WM_T_82574) {
   4505 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4506 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4507 		} else {
   4508 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4509 		}
   4510 	}
   4511 
   4512 	/* Stop the transmit and receive processes. */
   4513 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4514 	sc->sc_rctl &= ~RCTL_EN;
   4515 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4516 	CSR_WRITE_FLUSH(sc);
   4517 
   4518 	/* XXX set_tbi_sbp_82543() */
   4519 
   4520 	delay(10*1000);
   4521 
   4522 	/* Must acquire the MDIO ownership before MAC reset */
   4523 	switch (sc->sc_type) {
   4524 	case WM_T_82573:
   4525 	case WM_T_82574:
   4526 	case WM_T_82583:
   4527 		error = wm_get_hw_semaphore_82573(sc);
   4528 		break;
   4529 	default:
   4530 		break;
   4531 	}
   4532 
   4533 	/*
   4534 	 * 82541 Errata 29? & 82547 Errata 28?
   4535 	 * See also the description about PHY_RST bit in CTRL register
   4536 	 * in 8254x_GBe_SDM.pdf.
   4537 	 */
   4538 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4539 		CSR_WRITE(sc, WMREG_CTRL,
   4540 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4541 		CSR_WRITE_FLUSH(sc);
   4542 		delay(5000);
   4543 	}
   4544 
   4545 	switch (sc->sc_type) {
   4546 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4547 	case WM_T_82541:
   4548 	case WM_T_82541_2:
   4549 	case WM_T_82547:
   4550 	case WM_T_82547_2:
   4551 		/*
   4552 		 * On some chipsets, a reset through a memory-mapped write
   4553 		 * cycle can cause the chip to reset before completing the
   4554 		 * write cycle.  This causes major headache that can be
   4555 		 * avoided by issuing the reset via indirect register writes
   4556 		 * through I/O space.
   4557 		 *
   4558 		 * So, if we successfully mapped the I/O BAR at attach time,
   4559 		 * use that.  Otherwise, try our luck with a memory-mapped
   4560 		 * reset.
   4561 		 */
   4562 		if (sc->sc_flags & WM_F_IOH_VALID)
   4563 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4564 		else
   4565 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4566 		break;
   4567 	case WM_T_82545_3:
   4568 	case WM_T_82546_3:
   4569 		/* Use the shadow control register on these chips. */
   4570 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4571 		break;
   4572 	case WM_T_80003:
   4573 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4574 		sc->phy.acquire(sc);
   4575 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4576 		sc->phy.release(sc);
   4577 		break;
   4578 	case WM_T_ICH8:
   4579 	case WM_T_ICH9:
   4580 	case WM_T_ICH10:
   4581 	case WM_T_PCH:
   4582 	case WM_T_PCH2:
   4583 	case WM_T_PCH_LPT:
   4584 	case WM_T_PCH_SPT:
   4585 	case WM_T_PCH_CNP:
   4586 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4587 		if (wm_phy_resetisblocked(sc) == false) {
   4588 			/*
   4589 			 * Gate automatic PHY configuration by hardware on
   4590 			 * non-managed 82579
   4591 			 */
   4592 			if ((sc->sc_type == WM_T_PCH2)
   4593 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4594 				== 0))
   4595 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4596 
   4597 			reg |= CTRL_PHY_RESET;
   4598 			phy_reset = 1;
   4599 		} else
   4600 			printf("XXX reset is blocked!!!\n");
   4601 		sc->phy.acquire(sc);
   4602 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4603 		/* Don't insert a completion barrier when reset */
   4604 		delay(20*1000);
   4605 		mutex_exit(sc->sc_ich_phymtx);
   4606 		break;
   4607 	case WM_T_82580:
   4608 	case WM_T_I350:
   4609 	case WM_T_I354:
   4610 	case WM_T_I210:
   4611 	case WM_T_I211:
   4612 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4613 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4614 			CSR_WRITE_FLUSH(sc);
   4615 		delay(5000);
   4616 		break;
   4617 	case WM_T_82542_2_0:
   4618 	case WM_T_82542_2_1:
   4619 	case WM_T_82543:
   4620 	case WM_T_82540:
   4621 	case WM_T_82545:
   4622 	case WM_T_82546:
   4623 	case WM_T_82571:
   4624 	case WM_T_82572:
   4625 	case WM_T_82573:
   4626 	case WM_T_82574:
   4627 	case WM_T_82575:
   4628 	case WM_T_82576:
   4629 	case WM_T_82583:
   4630 	default:
   4631 		/* Everything else can safely use the documented method. */
   4632 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4633 		break;
   4634 	}
   4635 
   4636 	/* Must release the MDIO ownership after MAC reset */
   4637 	switch (sc->sc_type) {
   4638 	case WM_T_82573:
   4639 	case WM_T_82574:
   4640 	case WM_T_82583:
   4641 		if (error == 0)
   4642 			wm_put_hw_semaphore_82573(sc);
   4643 		break;
   4644 	default:
   4645 		break;
   4646 	}
   4647 
   4648 	if (phy_reset != 0)
   4649 		wm_get_cfg_done(sc);
   4650 
   4651 	/* reload EEPROM */
   4652 	switch (sc->sc_type) {
   4653 	case WM_T_82542_2_0:
   4654 	case WM_T_82542_2_1:
   4655 	case WM_T_82543:
   4656 	case WM_T_82544:
   4657 		delay(10);
   4658 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4659 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4660 		CSR_WRITE_FLUSH(sc);
   4661 		delay(2000);
   4662 		break;
   4663 	case WM_T_82540:
   4664 	case WM_T_82545:
   4665 	case WM_T_82545_3:
   4666 	case WM_T_82546:
   4667 	case WM_T_82546_3:
   4668 		delay(5*1000);
   4669 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4670 		break;
   4671 	case WM_T_82541:
   4672 	case WM_T_82541_2:
   4673 	case WM_T_82547:
   4674 	case WM_T_82547_2:
   4675 		delay(20000);
   4676 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4677 		break;
   4678 	case WM_T_82571:
   4679 	case WM_T_82572:
   4680 	case WM_T_82573:
   4681 	case WM_T_82574:
   4682 	case WM_T_82583:
   4683 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4684 			delay(10);
   4685 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4686 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4687 			CSR_WRITE_FLUSH(sc);
   4688 		}
   4689 		/* check EECD_EE_AUTORD */
   4690 		wm_get_auto_rd_done(sc);
   4691 		/*
   4692 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4693 		 * is set.
   4694 		 */
   4695 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4696 		    || (sc->sc_type == WM_T_82583))
   4697 			delay(25*1000);
   4698 		break;
   4699 	case WM_T_82575:
   4700 	case WM_T_82576:
   4701 	case WM_T_82580:
   4702 	case WM_T_I350:
   4703 	case WM_T_I354:
   4704 	case WM_T_I210:
   4705 	case WM_T_I211:
   4706 	case WM_T_80003:
   4707 		/* check EECD_EE_AUTORD */
   4708 		wm_get_auto_rd_done(sc);
   4709 		break;
   4710 	case WM_T_ICH8:
   4711 	case WM_T_ICH9:
   4712 	case WM_T_ICH10:
   4713 	case WM_T_PCH:
   4714 	case WM_T_PCH2:
   4715 	case WM_T_PCH_LPT:
   4716 	case WM_T_PCH_SPT:
   4717 	case WM_T_PCH_CNP:
   4718 		break;
   4719 	default:
   4720 		panic("%s: unknown type\n", __func__);
   4721 	}
   4722 
   4723 	/* Check whether EEPROM is present or not */
   4724 	switch (sc->sc_type) {
   4725 	case WM_T_82575:
   4726 	case WM_T_82576:
   4727 	case WM_T_82580:
   4728 	case WM_T_I350:
   4729 	case WM_T_I354:
   4730 	case WM_T_ICH8:
   4731 	case WM_T_ICH9:
   4732 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4733 			/* Not found */
   4734 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4735 			if (sc->sc_type == WM_T_82575)
   4736 				wm_reset_init_script_82575(sc);
   4737 		}
   4738 		break;
   4739 	default:
   4740 		break;
   4741 	}
   4742 
   4743 	if (phy_reset != 0)
   4744 		wm_phy_post_reset(sc);
   4745 
   4746 	if ((sc->sc_type == WM_T_82580)
   4747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4748 		/* clear global device reset status bit */
   4749 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4750 	}
   4751 
   4752 	/* Clear any pending interrupt events. */
   4753 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4754 	reg = CSR_READ(sc, WMREG_ICR);
   4755 	if (wm_is_using_msix(sc)) {
   4756 		if (sc->sc_type != WM_T_82574) {
   4757 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4758 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4759 		} else
   4760 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4761 	}
   4762 
   4763 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4764 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4765 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4766 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4767 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4768 		reg |= KABGTXD_BGSQLBIAS;
   4769 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4770 	}
   4771 
   4772 	/* reload sc_ctrl */
   4773 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4774 
   4775 	if (sc->sc_type == WM_T_I354) {
   4776 #if 0
   4777 		/* I354 uses an external PHY */
   4778 		wm_set_eee_i354(sc);
   4779 #endif
   4780 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4781 		wm_set_eee_i350(sc);
   4782 
   4783 	/*
   4784 	 * For PCH, this write will make sure that any noise will be detected
   4785 	 * as a CRC error and be dropped rather than show up as a bad packet
   4786 	 * to the DMA engine
   4787 	 */
   4788 	if (sc->sc_type == WM_T_PCH)
   4789 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4790 
   4791 	if (sc->sc_type >= WM_T_82544)
   4792 		CSR_WRITE(sc, WMREG_WUC, 0);
   4793 
   4794 	wm_reset_mdicnfg_82580(sc);
   4795 
   4796 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4797 		wm_pll_workaround_i210(sc);
   4798 
   4799 	if (sc->sc_type == WM_T_80003) {
   4800 		/* default to TRUE to enable the MDIC W/A */
   4801 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4802 
   4803 		rv = wm_kmrn_readreg(sc,
   4804 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4805 		if (rv == 0) {
   4806 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4807 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4808 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4809 			else
   4810 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4811 		}
   4812 	}
   4813 }
   4814 
   4815 /*
   4816  * wm_add_rxbuf:
   4817  *
   4818  *	Add a receive buffer to the indiciated descriptor.
   4819  */
   4820 static int
   4821 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4822 {
   4823 	struct wm_softc *sc = rxq->rxq_sc;
   4824 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4825 	struct mbuf *m;
   4826 	int error;
   4827 
   4828 	KASSERT(mutex_owned(rxq->rxq_lock));
   4829 
   4830 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4831 	if (m == NULL)
   4832 		return ENOBUFS;
   4833 
   4834 	MCLGET(m, M_DONTWAIT);
   4835 	if ((m->m_flags & M_EXT) == 0) {
   4836 		m_freem(m);
   4837 		return ENOBUFS;
   4838 	}
   4839 
   4840 	if (rxs->rxs_mbuf != NULL)
   4841 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4842 
   4843 	rxs->rxs_mbuf = m;
   4844 
   4845 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4846 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4847 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4848 	if (error) {
   4849 		/* XXX XXX XXX */
   4850 		aprint_error_dev(sc->sc_dev,
   4851 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4852 		panic("wm_add_rxbuf");
   4853 	}
   4854 
   4855 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4856 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4857 
   4858 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4859 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4860 			wm_init_rxdesc(rxq, idx);
   4861 	} else
   4862 		wm_init_rxdesc(rxq, idx);
   4863 
   4864 	return 0;
   4865 }
   4866 
   4867 /*
   4868  * wm_rxdrain:
   4869  *
   4870  *	Drain the receive queue.
   4871  */
   4872 static void
   4873 wm_rxdrain(struct wm_rxqueue *rxq)
   4874 {
   4875 	struct wm_softc *sc = rxq->rxq_sc;
   4876 	struct wm_rxsoft *rxs;
   4877 	int i;
   4878 
   4879 	KASSERT(mutex_owned(rxq->rxq_lock));
   4880 
   4881 	for (i = 0; i < WM_NRXDESC; i++) {
   4882 		rxs = &rxq->rxq_soft[i];
   4883 		if (rxs->rxs_mbuf != NULL) {
   4884 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4885 			m_freem(rxs->rxs_mbuf);
   4886 			rxs->rxs_mbuf = NULL;
   4887 		}
   4888 	}
   4889 }
   4890 
   4891 /*
   4892  * Setup registers for RSS.
   4893  *
   4894  * XXX not yet VMDq support
   4895  */
   4896 static void
   4897 wm_init_rss(struct wm_softc *sc)
   4898 {
   4899 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4900 	int i;
   4901 
   4902 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4903 
   4904 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4905 		int qid, reta_ent;
   4906 
   4907 		qid  = i % sc->sc_nqueues;
   4908 		switch (sc->sc_type) {
   4909 		case WM_T_82574:
   4910 			reta_ent = __SHIFTIN(qid,
   4911 			    RETA_ENT_QINDEX_MASK_82574);
   4912 			break;
   4913 		case WM_T_82575:
   4914 			reta_ent = __SHIFTIN(qid,
   4915 			    RETA_ENT_QINDEX1_MASK_82575);
   4916 			break;
   4917 		default:
   4918 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4919 			break;
   4920 		}
   4921 
   4922 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4923 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4924 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4925 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4926 	}
   4927 
   4928 	rss_getkey((uint8_t *)rss_key);
   4929 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4930 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4931 
   4932 	if (sc->sc_type == WM_T_82574)
   4933 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4934 	else
   4935 		mrqc = MRQC_ENABLE_RSS_MQ;
   4936 
   4937 	/*
   4938 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4939 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4940 	 */
   4941 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4942 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4943 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4944 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4945 
   4946 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4947 }
   4948 
   4949 /*
   4950  * Adjust TX and RX queue numbers which the system actulally uses.
   4951  *
   4952  * The numbers are affected by below parameters.
   4953  *     - The nubmer of hardware queues
   4954  *     - The number of MSI-X vectors (= "nvectors" argument)
   4955  *     - ncpu
   4956  */
   4957 static void
   4958 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4959 {
   4960 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4961 
   4962 	if (nvectors < 2) {
   4963 		sc->sc_nqueues = 1;
   4964 		return;
   4965 	}
   4966 
   4967 	switch (sc->sc_type) {
   4968 	case WM_T_82572:
   4969 		hw_ntxqueues = 2;
   4970 		hw_nrxqueues = 2;
   4971 		break;
   4972 	case WM_T_82574:
   4973 		hw_ntxqueues = 2;
   4974 		hw_nrxqueues = 2;
   4975 		break;
   4976 	case WM_T_82575:
   4977 		hw_ntxqueues = 4;
   4978 		hw_nrxqueues = 4;
   4979 		break;
   4980 	case WM_T_82576:
   4981 		hw_ntxqueues = 16;
   4982 		hw_nrxqueues = 16;
   4983 		break;
   4984 	case WM_T_82580:
   4985 	case WM_T_I350:
   4986 	case WM_T_I354:
   4987 		hw_ntxqueues = 8;
   4988 		hw_nrxqueues = 8;
   4989 		break;
   4990 	case WM_T_I210:
   4991 		hw_ntxqueues = 4;
   4992 		hw_nrxqueues = 4;
   4993 		break;
   4994 	case WM_T_I211:
   4995 		hw_ntxqueues = 2;
   4996 		hw_nrxqueues = 2;
   4997 		break;
   4998 		/*
   4999 		 * As below ethernet controllers does not support MSI-X,
   5000 		 * this driver let them not use multiqueue.
   5001 		 *     - WM_T_80003
   5002 		 *     - WM_T_ICH8
   5003 		 *     - WM_T_ICH9
   5004 		 *     - WM_T_ICH10
   5005 		 *     - WM_T_PCH
   5006 		 *     - WM_T_PCH2
   5007 		 *     - WM_T_PCH_LPT
   5008 		 */
   5009 	default:
   5010 		hw_ntxqueues = 1;
   5011 		hw_nrxqueues = 1;
   5012 		break;
   5013 	}
   5014 
   5015 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   5016 
   5017 	/*
   5018 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5019 	 * the number of queues used actually.
   5020 	 */
   5021 	if (nvectors < hw_nqueues + 1)
   5022 		sc->sc_nqueues = nvectors - 1;
   5023 	else
   5024 		sc->sc_nqueues = hw_nqueues;
   5025 
   5026 	/*
   5027 	 * As queues more then cpus cannot improve scaling, we limit
   5028 	 * the number of queues used actually.
   5029 	 */
   5030 	if (ncpu < sc->sc_nqueues)
   5031 		sc->sc_nqueues = ncpu;
   5032 }
   5033 
   5034 static inline bool
   5035 wm_is_using_msix(struct wm_softc *sc)
   5036 {
   5037 
   5038 	return (sc->sc_nintrs > 1);
   5039 }
   5040 
   5041 static inline bool
   5042 wm_is_using_multiqueue(struct wm_softc *sc)
   5043 {
   5044 
   5045 	return (sc->sc_nqueues > 1);
   5046 }
   5047 
   5048 static int
   5049 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5050 {
   5051 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5052 	wmq->wmq_id = qidx;
   5053 	wmq->wmq_intr_idx = intr_idx;
   5054 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5055 #ifdef WM_MPSAFE
   5056 	    | SOFTINT_MPSAFE
   5057 #endif
   5058 	    , wm_handle_queue, wmq);
   5059 	if (wmq->wmq_si != NULL)
   5060 		return 0;
   5061 
   5062 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5063 	    wmq->wmq_id);
   5064 
   5065 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5066 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5067 	return ENOMEM;
   5068 }
   5069 
   5070 /*
   5071  * Both single interrupt MSI and INTx can use this function.
   5072  */
   5073 static int
   5074 wm_setup_legacy(struct wm_softc *sc)
   5075 {
   5076 	pci_chipset_tag_t pc = sc->sc_pc;
   5077 	const char *intrstr = NULL;
   5078 	char intrbuf[PCI_INTRSTR_LEN];
   5079 	int error;
   5080 
   5081 	error = wm_alloc_txrx_queues(sc);
   5082 	if (error) {
   5083 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5084 		    error);
   5085 		return ENOMEM;
   5086 	}
   5087 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5088 	    sizeof(intrbuf));
   5089 #ifdef WM_MPSAFE
   5090 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5091 #endif
   5092 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5093 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5094 	if (sc->sc_ihs[0] == NULL) {
   5095 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5096 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5097 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5098 		return ENOMEM;
   5099 	}
   5100 
   5101 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5102 	sc->sc_nintrs = 1;
   5103 
   5104 	return wm_softint_establish(sc, 0, 0);
   5105 }
   5106 
   5107 static int
   5108 wm_setup_msix(struct wm_softc *sc)
   5109 {
   5110 	void *vih;
   5111 	kcpuset_t *affinity;
   5112 	int qidx, error, intr_idx, txrx_established;
   5113 	pci_chipset_tag_t pc = sc->sc_pc;
   5114 	const char *intrstr = NULL;
   5115 	char intrbuf[PCI_INTRSTR_LEN];
   5116 	char intr_xname[INTRDEVNAMEBUF];
   5117 
   5118 	if (sc->sc_nqueues < ncpu) {
   5119 		/*
   5120 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5121 		 * interrupts start from CPU#1.
   5122 		 */
   5123 		sc->sc_affinity_offset = 1;
   5124 	} else {
   5125 		/*
   5126 		 * In this case, this device use all CPUs. So, we unify
   5127 		 * affinitied cpu_index to msix vector number for readability.
   5128 		 */
   5129 		sc->sc_affinity_offset = 0;
   5130 	}
   5131 
   5132 	error = wm_alloc_txrx_queues(sc);
   5133 	if (error) {
   5134 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5135 		    error);
   5136 		return ENOMEM;
   5137 	}
   5138 
   5139 	kcpuset_create(&affinity, false);
   5140 	intr_idx = 0;
   5141 
   5142 	/*
   5143 	 * TX and RX
   5144 	 */
   5145 	txrx_established = 0;
   5146 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5147 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5148 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5149 
   5150 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5151 		    sizeof(intrbuf));
   5152 #ifdef WM_MPSAFE
   5153 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5154 		    PCI_INTR_MPSAFE, true);
   5155 #endif
   5156 		memset(intr_xname, 0, sizeof(intr_xname));
   5157 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5158 		    device_xname(sc->sc_dev), qidx);
   5159 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5160 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5161 		if (vih == NULL) {
   5162 			aprint_error_dev(sc->sc_dev,
   5163 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5164 			    intrstr ? " at " : "",
   5165 			    intrstr ? intrstr : "");
   5166 
   5167 			goto fail;
   5168 		}
   5169 		kcpuset_zero(affinity);
   5170 		/* Round-robin affinity */
   5171 		kcpuset_set(affinity, affinity_to);
   5172 		error = interrupt_distribute(vih, affinity, NULL);
   5173 		if (error == 0) {
   5174 			aprint_normal_dev(sc->sc_dev,
   5175 			    "for TX and RX interrupting at %s affinity to %u\n",
   5176 			    intrstr, affinity_to);
   5177 		} else {
   5178 			aprint_normal_dev(sc->sc_dev,
   5179 			    "for TX and RX interrupting at %s\n", intrstr);
   5180 		}
   5181 		sc->sc_ihs[intr_idx] = vih;
   5182 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5183 			goto fail;
   5184 		txrx_established++;
   5185 		intr_idx++;
   5186 	}
   5187 
   5188 	/*
   5189 	 * LINK
   5190 	 */
   5191 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5192 	    sizeof(intrbuf));
   5193 #ifdef WM_MPSAFE
   5194 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5195 #endif
   5196 	memset(intr_xname, 0, sizeof(intr_xname));
   5197 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5198 	    device_xname(sc->sc_dev));
   5199 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5200 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5201 	if (vih == NULL) {
   5202 		aprint_error_dev(sc->sc_dev,
   5203 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5204 		    intrstr ? " at " : "",
   5205 		    intrstr ? intrstr : "");
   5206 
   5207 		goto fail;
   5208 	}
   5209 	/* keep default affinity to LINK interrupt */
   5210 	aprint_normal_dev(sc->sc_dev,
   5211 	    "for LINK interrupting at %s\n", intrstr);
   5212 	sc->sc_ihs[intr_idx] = vih;
   5213 	sc->sc_link_intr_idx = intr_idx;
   5214 
   5215 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5216 	kcpuset_destroy(affinity);
   5217 	return 0;
   5218 
   5219  fail:
   5220 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5221 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5222 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5223 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5224 	}
   5225 
   5226 	kcpuset_destroy(affinity);
   5227 	return ENOMEM;
   5228 }
   5229 
   5230 static void
   5231 wm_unset_stopping_flags(struct wm_softc *sc)
   5232 {
   5233 	int i;
   5234 
   5235 	KASSERT(WM_CORE_LOCKED(sc));
   5236 
   5237 	/*
   5238 	 * must unset stopping flags in ascending order.
   5239 	 */
   5240 	for (i = 0; i < sc->sc_nqueues; i++) {
   5241 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5242 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5243 
   5244 		mutex_enter(txq->txq_lock);
   5245 		txq->txq_stopping = false;
   5246 		mutex_exit(txq->txq_lock);
   5247 
   5248 		mutex_enter(rxq->rxq_lock);
   5249 		rxq->rxq_stopping = false;
   5250 		mutex_exit(rxq->rxq_lock);
   5251 	}
   5252 
   5253 	sc->sc_core_stopping = false;
   5254 }
   5255 
   5256 static void
   5257 wm_set_stopping_flags(struct wm_softc *sc)
   5258 {
   5259 	int i;
   5260 
   5261 	KASSERT(WM_CORE_LOCKED(sc));
   5262 
   5263 	sc->sc_core_stopping = true;
   5264 
   5265 	/*
   5266 	 * must set stopping flags in ascending order.
   5267 	 */
   5268 	for (i = 0; i < sc->sc_nqueues; i++) {
   5269 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5270 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5271 
   5272 		mutex_enter(rxq->rxq_lock);
   5273 		rxq->rxq_stopping = true;
   5274 		mutex_exit(rxq->rxq_lock);
   5275 
   5276 		mutex_enter(txq->txq_lock);
   5277 		txq->txq_stopping = true;
   5278 		mutex_exit(txq->txq_lock);
   5279 	}
   5280 }
   5281 
   5282 /*
   5283  * write interrupt interval value to ITR or EITR
   5284  */
   5285 static void
   5286 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5287 {
   5288 
   5289 	if (!wmq->wmq_set_itr)
   5290 		return;
   5291 
   5292 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5293 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5294 
   5295 		/*
   5296 		 * 82575 doesn't have CNT_INGR field.
   5297 		 * So, overwrite counter field by software.
   5298 		 */
   5299 		if (sc->sc_type == WM_T_82575)
   5300 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5301 		else
   5302 			eitr |= EITR_CNT_INGR;
   5303 
   5304 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5305 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5306 		/*
   5307 		 * 82574 has both ITR and EITR. SET EITR when we use
   5308 		 * the multi queue function with MSI-X.
   5309 		 */
   5310 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5311 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5312 	} else {
   5313 		KASSERT(wmq->wmq_id == 0);
   5314 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5315 	}
   5316 
   5317 	wmq->wmq_set_itr = false;
   5318 }
   5319 
   5320 /*
   5321  * TODO
   5322  * Below dynamic calculation of itr is almost the same as linux igb,
   5323  * however it does not fit to wm(4). So, we will have been disable AIM
   5324  * until we will find appropriate calculation of itr.
   5325  */
   5326 /*
   5327  * calculate interrupt interval value to be going to write register in
   5328  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5329  */
   5330 static void
   5331 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5332 {
   5333 #ifdef NOTYET
   5334 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5335 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5336 	uint32_t avg_size = 0;
   5337 	uint32_t new_itr;
   5338 
   5339 	if (rxq->rxq_packets)
   5340 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5341 	if (txq->txq_packets)
   5342 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5343 
   5344 	if (avg_size == 0) {
   5345 		new_itr = 450; /* restore default value */
   5346 		goto out;
   5347 	}
   5348 
   5349 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5350 	avg_size += 24;
   5351 
   5352 	/* Don't starve jumbo frames */
   5353 	avg_size = min(avg_size, 3000);
   5354 
   5355 	/* Give a little boost to mid-size frames */
   5356 	if ((avg_size > 300) && (avg_size < 1200))
   5357 		new_itr = avg_size / 3;
   5358 	else
   5359 		new_itr = avg_size / 2;
   5360 
   5361 out:
   5362 	/*
   5363 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5364 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5365 	 */
   5366 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5367 		new_itr *= 4;
   5368 
   5369 	if (new_itr != wmq->wmq_itr) {
   5370 		wmq->wmq_itr = new_itr;
   5371 		wmq->wmq_set_itr = true;
   5372 	} else
   5373 		wmq->wmq_set_itr = false;
   5374 
   5375 	rxq->rxq_packets = 0;
   5376 	rxq->rxq_bytes = 0;
   5377 	txq->txq_packets = 0;
   5378 	txq->txq_bytes = 0;
   5379 #endif
   5380 }
   5381 
   5382 /*
   5383  * wm_init:		[ifnet interface function]
   5384  *
   5385  *	Initialize the interface.
   5386  */
   5387 static int
   5388 wm_init(struct ifnet *ifp)
   5389 {
   5390 	struct wm_softc *sc = ifp->if_softc;
   5391 	int ret;
   5392 
   5393 	WM_CORE_LOCK(sc);
   5394 	ret = wm_init_locked(ifp);
   5395 	WM_CORE_UNLOCK(sc);
   5396 
   5397 	return ret;
   5398 }
   5399 
   5400 static int
   5401 wm_init_locked(struct ifnet *ifp)
   5402 {
   5403 	struct wm_softc *sc = ifp->if_softc;
   5404 	int i, j, trynum, error = 0;
   5405 	uint32_t reg;
   5406 
   5407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5408 		device_xname(sc->sc_dev), __func__));
   5409 	KASSERT(WM_CORE_LOCKED(sc));
   5410 
   5411 	/*
   5412 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5413 	 * There is a small but measurable benefit to avoiding the adjusment
   5414 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5415 	 * on such platforms.  One possibility is that the DMA itself is
   5416 	 * slightly more efficient if the front of the entire packet (instead
   5417 	 * of the front of the headers) is aligned.
   5418 	 *
   5419 	 * Note we must always set align_tweak to 0 if we are using
   5420 	 * jumbo frames.
   5421 	 */
   5422 #ifdef __NO_STRICT_ALIGNMENT
   5423 	sc->sc_align_tweak = 0;
   5424 #else
   5425 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5426 		sc->sc_align_tweak = 0;
   5427 	else
   5428 		sc->sc_align_tweak = 2;
   5429 #endif /* __NO_STRICT_ALIGNMENT */
   5430 
   5431 	/* Cancel any pending I/O. */
   5432 	wm_stop_locked(ifp, 0);
   5433 
   5434 	/* update statistics before reset */
   5435 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5436 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5437 
   5438 	/* PCH_SPT hardware workaround */
   5439 	if (sc->sc_type == WM_T_PCH_SPT)
   5440 		wm_flush_desc_rings(sc);
   5441 
   5442 	/* Reset the chip to a known state. */
   5443 	wm_reset(sc);
   5444 
   5445 	/*
   5446 	 * AMT based hardware can now take control from firmware
   5447 	 * Do this after reset.
   5448 	 */
   5449 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5450 		wm_get_hw_control(sc);
   5451 
   5452 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5453 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5454 		wm_legacy_irq_quirk_spt(sc);
   5455 
   5456 	/* Init hardware bits */
   5457 	wm_initialize_hardware_bits(sc);
   5458 
   5459 	/* Reset the PHY. */
   5460 	if (sc->sc_flags & WM_F_HAS_MII)
   5461 		wm_gmii_reset(sc);
   5462 
   5463 	/* Calculate (E)ITR value */
   5464 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5465 		/*
   5466 		 * For NEWQUEUE's EITR (except for 82575).
   5467 		 * 82575's EITR should be set same throttling value as other
   5468 		 * old controllers' ITR because the interrupt/sec calculation
   5469 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5470 		 *
   5471 		 * 82574's EITR should be set same throttling value as ITR.
   5472 		 *
   5473 		 * For N interrupts/sec, set this value to:
   5474 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5475 		 */
   5476 		sc->sc_itr_init = 450;
   5477 	} else if (sc->sc_type >= WM_T_82543) {
   5478 		/*
   5479 		 * Set up the interrupt throttling register (units of 256ns)
   5480 		 * Note that a footnote in Intel's documentation says this
   5481 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5482 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5483 		 * that that is also true for the 1024ns units of the other
   5484 		 * interrupt-related timer registers -- so, really, we ought
   5485 		 * to divide this value by 4 when the link speed is low.
   5486 		 *
   5487 		 * XXX implement this division at link speed change!
   5488 		 */
   5489 
   5490 		/*
   5491 		 * For N interrupts/sec, set this value to:
   5492 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5493 		 * absolute and packet timer values to this value
   5494 		 * divided by 4 to get "simple timer" behavior.
   5495 		 */
   5496 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5497 	}
   5498 
   5499 	error = wm_init_txrx_queues(sc);
   5500 	if (error)
   5501 		goto out;
   5502 
   5503 	/*
   5504 	 * Clear out the VLAN table -- we don't use it (yet).
   5505 	 */
   5506 	CSR_WRITE(sc, WMREG_VET, 0);
   5507 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5508 		trynum = 10; /* Due to hw errata */
   5509 	else
   5510 		trynum = 1;
   5511 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5512 		for (j = 0; j < trynum; j++)
   5513 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5514 
   5515 	/*
   5516 	 * Set up flow-control parameters.
   5517 	 *
   5518 	 * XXX Values could probably stand some tuning.
   5519 	 */
   5520 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5521 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5522 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5523 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5524 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5525 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5526 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5527 	}
   5528 
   5529 	sc->sc_fcrtl = FCRTL_DFLT;
   5530 	if (sc->sc_type < WM_T_82543) {
   5531 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5532 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5533 	} else {
   5534 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5535 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5536 	}
   5537 
   5538 	if (sc->sc_type == WM_T_80003)
   5539 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5540 	else
   5541 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5542 
   5543 	/* Writes the control register. */
   5544 	wm_set_vlan(sc);
   5545 
   5546 	if (sc->sc_flags & WM_F_HAS_MII) {
   5547 		uint16_t kmreg;
   5548 
   5549 		switch (sc->sc_type) {
   5550 		case WM_T_80003:
   5551 		case WM_T_ICH8:
   5552 		case WM_T_ICH9:
   5553 		case WM_T_ICH10:
   5554 		case WM_T_PCH:
   5555 		case WM_T_PCH2:
   5556 		case WM_T_PCH_LPT:
   5557 		case WM_T_PCH_SPT:
   5558 		case WM_T_PCH_CNP:
   5559 			/*
   5560 			 * Set the mac to wait the maximum time between each
   5561 			 * iteration and increase the max iterations when
   5562 			 * polling the phy; this fixes erroneous timeouts at
   5563 			 * 10Mbps.
   5564 			 */
   5565 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5566 			    0xFFFF);
   5567 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5568 			    &kmreg);
   5569 			kmreg |= 0x3F;
   5570 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5571 			    kmreg);
   5572 			break;
   5573 		default:
   5574 			break;
   5575 		}
   5576 
   5577 		if (sc->sc_type == WM_T_80003) {
   5578 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5579 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5581 
   5582 			/* Bypass RX and TX FIFO's */
   5583 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5584 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5585 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5586 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5587 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5588 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5589 		}
   5590 	}
   5591 #if 0
   5592 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5593 #endif
   5594 
   5595 	/* Set up checksum offload parameters. */
   5596 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5597 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5598 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5599 		reg |= RXCSUM_IPOFL;
   5600 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5601 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5602 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5603 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5604 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5605 
   5606 	/* Set registers about MSI-X */
   5607 	if (wm_is_using_msix(sc)) {
   5608 		uint32_t ivar;
   5609 		struct wm_queue *wmq;
   5610 		int qid, qintr_idx;
   5611 
   5612 		if (sc->sc_type == WM_T_82575) {
   5613 			/* Interrupt control */
   5614 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5615 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5616 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5617 
   5618 			/* TX and RX */
   5619 			for (i = 0; i < sc->sc_nqueues; i++) {
   5620 				wmq = &sc->sc_queue[i];
   5621 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5622 				    EITR_TX_QUEUE(wmq->wmq_id)
   5623 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5624 			}
   5625 			/* Link status */
   5626 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5627 			    EITR_OTHER);
   5628 		} else if (sc->sc_type == WM_T_82574) {
   5629 			/* Interrupt control */
   5630 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5631 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5632 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5633 
   5634 			/*
   5635 			 * workaround issue with spurious interrupts
   5636 			 * in MSI-X mode.
   5637 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5638 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5639 			 */
   5640 			reg = CSR_READ(sc, WMREG_RFCTL);
   5641 			reg |= WMREG_RFCTL_ACKDIS;
   5642 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5643 
   5644 			ivar = 0;
   5645 			/* TX and RX */
   5646 			for (i = 0; i < sc->sc_nqueues; i++) {
   5647 				wmq = &sc->sc_queue[i];
   5648 				qid = wmq->wmq_id;
   5649 				qintr_idx = wmq->wmq_intr_idx;
   5650 
   5651 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5652 				    IVAR_TX_MASK_Q_82574(qid));
   5653 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5654 				    IVAR_RX_MASK_Q_82574(qid));
   5655 			}
   5656 			/* Link status */
   5657 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5658 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5659 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5660 		} else {
   5661 			/* Interrupt control */
   5662 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5663 			    | GPIE_EIAME | GPIE_PBA);
   5664 
   5665 			switch (sc->sc_type) {
   5666 			case WM_T_82580:
   5667 			case WM_T_I350:
   5668 			case WM_T_I354:
   5669 			case WM_T_I210:
   5670 			case WM_T_I211:
   5671 				/* TX and RX */
   5672 				for (i = 0; i < sc->sc_nqueues; i++) {
   5673 					wmq = &sc->sc_queue[i];
   5674 					qid = wmq->wmq_id;
   5675 					qintr_idx = wmq->wmq_intr_idx;
   5676 
   5677 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5678 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5679 					ivar |= __SHIFTIN((qintr_idx
   5680 						| IVAR_VALID),
   5681 					    IVAR_TX_MASK_Q(qid));
   5682 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5683 					ivar |= __SHIFTIN((qintr_idx
   5684 						| IVAR_VALID),
   5685 					    IVAR_RX_MASK_Q(qid));
   5686 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5687 				}
   5688 				break;
   5689 			case WM_T_82576:
   5690 				/* TX and RX */
   5691 				for (i = 0; i < sc->sc_nqueues; i++) {
   5692 					wmq = &sc->sc_queue[i];
   5693 					qid = wmq->wmq_id;
   5694 					qintr_idx = wmq->wmq_intr_idx;
   5695 
   5696 					ivar = CSR_READ(sc,
   5697 					    WMREG_IVAR_Q_82576(qid));
   5698 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5699 					ivar |= __SHIFTIN((qintr_idx
   5700 						| IVAR_VALID),
   5701 					    IVAR_TX_MASK_Q_82576(qid));
   5702 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5703 					ivar |= __SHIFTIN((qintr_idx
   5704 						| IVAR_VALID),
   5705 					    IVAR_RX_MASK_Q_82576(qid));
   5706 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5707 					    ivar);
   5708 				}
   5709 				break;
   5710 			default:
   5711 				break;
   5712 			}
   5713 
   5714 			/* Link status */
   5715 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5716 			    IVAR_MISC_OTHER);
   5717 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5718 		}
   5719 
   5720 		if (wm_is_using_multiqueue(sc)) {
   5721 			wm_init_rss(sc);
   5722 
   5723 			/*
   5724 			** NOTE: Receive Full-Packet Checksum Offload
   5725 			** is mutually exclusive with Multiqueue. However
   5726 			** this is not the same as TCP/IP checksums which
   5727 			** still work.
   5728 			*/
   5729 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5730 			reg |= RXCSUM_PCSD;
   5731 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5732 		}
   5733 	}
   5734 
   5735 	/* Set up the interrupt registers. */
   5736 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5737 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5738 	    ICR_RXO | ICR_RXT0;
   5739 	if (wm_is_using_msix(sc)) {
   5740 		uint32_t mask;
   5741 		struct wm_queue *wmq;
   5742 
   5743 		switch (sc->sc_type) {
   5744 		case WM_T_82574:
   5745 			mask = 0;
   5746 			for (i = 0; i < sc->sc_nqueues; i++) {
   5747 				wmq = &sc->sc_queue[i];
   5748 				mask |= ICR_TXQ(wmq->wmq_id);
   5749 				mask |= ICR_RXQ(wmq->wmq_id);
   5750 			}
   5751 			mask |= ICR_OTHER;
   5752 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5753 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5754 			break;
   5755 		default:
   5756 			if (sc->sc_type == WM_T_82575) {
   5757 				mask = 0;
   5758 				for (i = 0; i < sc->sc_nqueues; i++) {
   5759 					wmq = &sc->sc_queue[i];
   5760 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5761 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5762 				}
   5763 				mask |= EITR_OTHER;
   5764 			} else {
   5765 				mask = 0;
   5766 				for (i = 0; i < sc->sc_nqueues; i++) {
   5767 					wmq = &sc->sc_queue[i];
   5768 					mask |= 1 << wmq->wmq_intr_idx;
   5769 				}
   5770 				mask |= 1 << sc->sc_link_intr_idx;
   5771 			}
   5772 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5773 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5774 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5775 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5776 			break;
   5777 		}
   5778 	} else
   5779 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5780 
   5781 	/* Set up the inter-packet gap. */
   5782 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5783 
   5784 	if (sc->sc_type >= WM_T_82543) {
   5785 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5786 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5787 			wm_itrs_writereg(sc, wmq);
   5788 		}
   5789 		/*
   5790 		 * Link interrupts occur much less than TX
   5791 		 * interrupts and RX interrupts. So, we don't
   5792 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5793 		 * FreeBSD's if_igb.
   5794 		 */
   5795 	}
   5796 
   5797 	/* Set the VLAN ethernetype. */
   5798 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5799 
   5800 	/*
   5801 	 * Set up the transmit control register; we start out with
   5802 	 * a collision distance suitable for FDX, but update it whe
   5803 	 * we resolve the media type.
   5804 	 */
   5805 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5806 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5807 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5808 	if (sc->sc_type >= WM_T_82571)
   5809 		sc->sc_tctl |= TCTL_MULR;
   5810 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5811 
   5812 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5813 		/* Write TDT after TCTL.EN is set. See the document. */
   5814 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5815 	}
   5816 
   5817 	if (sc->sc_type == WM_T_80003) {
   5818 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5819 		reg &= ~TCTL_EXT_GCEX_MASK;
   5820 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5821 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5822 	}
   5823 
   5824 	/* Set the media. */
   5825 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5826 		goto out;
   5827 
   5828 	/* Configure for OS presence */
   5829 	wm_init_manageability(sc);
   5830 
   5831 	/*
   5832 	 * Set up the receive control register; we actually program
   5833 	 * the register when we set the receive filter.  Use multicast
   5834 	 * address offset type 0.
   5835 	 *
   5836 	 * Only the i82544 has the ability to strip the incoming
   5837 	 * CRC, so we don't enable that feature.
   5838 	 */
   5839 	sc->sc_mchash_type = 0;
   5840 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5841 	    | RCTL_MO(sc->sc_mchash_type);
   5842 
   5843 	/*
   5844 	 * 82574 use one buffer extended Rx descriptor.
   5845 	 */
   5846 	if (sc->sc_type == WM_T_82574)
   5847 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5848 
   5849 	/*
   5850 	 * The I350 has a bug where it always strips the CRC whether
   5851 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5852 	 */
   5853 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5854 	    || (sc->sc_type == WM_T_I210))
   5855 		sc->sc_rctl |= RCTL_SECRC;
   5856 
   5857 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5858 	    && (ifp->if_mtu > ETHERMTU)) {
   5859 		sc->sc_rctl |= RCTL_LPE;
   5860 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5861 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5862 	}
   5863 
   5864 	if (MCLBYTES == 2048) {
   5865 		sc->sc_rctl |= RCTL_2k;
   5866 	} else {
   5867 		if (sc->sc_type >= WM_T_82543) {
   5868 			switch (MCLBYTES) {
   5869 			case 4096:
   5870 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5871 				break;
   5872 			case 8192:
   5873 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5874 				break;
   5875 			case 16384:
   5876 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5877 				break;
   5878 			default:
   5879 				panic("wm_init: MCLBYTES %d unsupported",
   5880 				    MCLBYTES);
   5881 				break;
   5882 			}
   5883 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5884 	}
   5885 
   5886 	/* Enable ECC */
   5887 	switch (sc->sc_type) {
   5888 	case WM_T_82571:
   5889 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5890 		reg |= PBA_ECC_CORR_EN;
   5891 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5892 		break;
   5893 	case WM_T_PCH_LPT:
   5894 	case WM_T_PCH_SPT:
   5895 	case WM_T_PCH_CNP:
   5896 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5897 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5898 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5899 
   5900 		sc->sc_ctrl |= CTRL_MEHE;
   5901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5902 		break;
   5903 	default:
   5904 		break;
   5905 	}
   5906 
   5907 	/*
   5908 	 * Set the receive filter.
   5909 	 *
   5910 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5911 	 * the setting of RCTL.EN in wm_set_filter()
   5912 	 */
   5913 	wm_set_filter(sc);
   5914 
   5915 	/* On 575 and later set RDT only if RX enabled */
   5916 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5917 		int qidx;
   5918 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5919 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5920 			for (i = 0; i < WM_NRXDESC; i++) {
   5921 				mutex_enter(rxq->rxq_lock);
   5922 				wm_init_rxdesc(rxq, i);
   5923 				mutex_exit(rxq->rxq_lock);
   5924 
   5925 			}
   5926 		}
   5927 	}
   5928 
   5929 	wm_unset_stopping_flags(sc);
   5930 
   5931 	/* Start the one second link check clock. */
   5932 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5933 
   5934 	/* ...all done! */
   5935 	ifp->if_flags |= IFF_RUNNING;
   5936 	ifp->if_flags &= ~IFF_OACTIVE;
   5937 
   5938  out:
   5939 	sc->sc_if_flags = ifp->if_flags;
   5940 	if (error)
   5941 		log(LOG_ERR, "%s: interface not running\n",
   5942 		    device_xname(sc->sc_dev));
   5943 	return error;
   5944 }
   5945 
   5946 /*
   5947  * wm_stop:		[ifnet interface function]
   5948  *
   5949  *	Stop transmission on the interface.
   5950  */
   5951 static void
   5952 wm_stop(struct ifnet *ifp, int disable)
   5953 {
   5954 	struct wm_softc *sc = ifp->if_softc;
   5955 
   5956 	WM_CORE_LOCK(sc);
   5957 	wm_stop_locked(ifp, disable);
   5958 	WM_CORE_UNLOCK(sc);
   5959 }
   5960 
   5961 static void
   5962 wm_stop_locked(struct ifnet *ifp, int disable)
   5963 {
   5964 	struct wm_softc *sc = ifp->if_softc;
   5965 	struct wm_txsoft *txs;
   5966 	int i, qidx;
   5967 
   5968 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5969 		device_xname(sc->sc_dev), __func__));
   5970 	KASSERT(WM_CORE_LOCKED(sc));
   5971 
   5972 	wm_set_stopping_flags(sc);
   5973 
   5974 	/* Stop the one second clock. */
   5975 	callout_stop(&sc->sc_tick_ch);
   5976 
   5977 	/* Stop the 82547 Tx FIFO stall check timer. */
   5978 	if (sc->sc_type == WM_T_82547)
   5979 		callout_stop(&sc->sc_txfifo_ch);
   5980 
   5981 	if (sc->sc_flags & WM_F_HAS_MII) {
   5982 		/* Down the MII. */
   5983 		mii_down(&sc->sc_mii);
   5984 	} else {
   5985 #if 0
   5986 		/* Should we clear PHY's status properly? */
   5987 		wm_reset(sc);
   5988 #endif
   5989 	}
   5990 
   5991 	/* Stop the transmit and receive processes. */
   5992 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5993 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5994 	sc->sc_rctl &= ~RCTL_EN;
   5995 
   5996 	/*
   5997 	 * Clear the interrupt mask to ensure the device cannot assert its
   5998 	 * interrupt line.
   5999 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6000 	 * service any currently pending or shared interrupt.
   6001 	 */
   6002 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6003 	sc->sc_icr = 0;
   6004 	if (wm_is_using_msix(sc)) {
   6005 		if (sc->sc_type != WM_T_82574) {
   6006 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6007 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6008 		} else
   6009 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6010 	}
   6011 
   6012 	/* Release any queued transmit buffers. */
   6013 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6014 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6015 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6016 		mutex_enter(txq->txq_lock);
   6017 		txq->txq_sending = false; /* ensure watchdog disabled */
   6018 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6019 			txs = &txq->txq_soft[i];
   6020 			if (txs->txs_mbuf != NULL) {
   6021 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6022 				m_freem(txs->txs_mbuf);
   6023 				txs->txs_mbuf = NULL;
   6024 			}
   6025 		}
   6026 		mutex_exit(txq->txq_lock);
   6027 	}
   6028 
   6029 	/* Mark the interface as down and cancel the watchdog timer. */
   6030 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6031 
   6032 	if (disable) {
   6033 		for (i = 0; i < sc->sc_nqueues; i++) {
   6034 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6035 			mutex_enter(rxq->rxq_lock);
   6036 			wm_rxdrain(rxq);
   6037 			mutex_exit(rxq->rxq_lock);
   6038 		}
   6039 	}
   6040 
   6041 #if 0 /* notyet */
   6042 	if (sc->sc_type >= WM_T_82544)
   6043 		CSR_WRITE(sc, WMREG_WUC, 0);
   6044 #endif
   6045 }
   6046 
   6047 static void
   6048 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6049 {
   6050 	struct mbuf *m;
   6051 	int i;
   6052 
   6053 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6054 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6055 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6056 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6057 		    m->m_data, m->m_len, m->m_flags);
   6058 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6059 	    i, i == 1 ? "" : "s");
   6060 }
   6061 
   6062 /*
   6063  * wm_82547_txfifo_stall:
   6064  *
   6065  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6066  *	reset the FIFO pointers, and restart packet transmission.
   6067  */
   6068 static void
   6069 wm_82547_txfifo_stall(void *arg)
   6070 {
   6071 	struct wm_softc *sc = arg;
   6072 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6073 
   6074 	mutex_enter(txq->txq_lock);
   6075 
   6076 	if (txq->txq_stopping)
   6077 		goto out;
   6078 
   6079 	if (txq->txq_fifo_stall) {
   6080 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6081 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6082 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6083 			/*
   6084 			 * Packets have drained.  Stop transmitter, reset
   6085 			 * FIFO pointers, restart transmitter, and kick
   6086 			 * the packet queue.
   6087 			 */
   6088 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6089 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6090 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6091 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6092 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6093 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6094 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6095 			CSR_WRITE_FLUSH(sc);
   6096 
   6097 			txq->txq_fifo_head = 0;
   6098 			txq->txq_fifo_stall = 0;
   6099 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6100 		} else {
   6101 			/*
   6102 			 * Still waiting for packets to drain; try again in
   6103 			 * another tick.
   6104 			 */
   6105 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6106 		}
   6107 	}
   6108 
   6109 out:
   6110 	mutex_exit(txq->txq_lock);
   6111 }
   6112 
   6113 /*
   6114  * wm_82547_txfifo_bugchk:
   6115  *
   6116  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6117  *	prevent enqueueing a packet that would wrap around the end
   6118  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6119  *
   6120  *	We do this by checking the amount of space before the end
   6121  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6122  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6123  *	the internal FIFO pointers to the beginning, and restart
   6124  *	transmission on the interface.
   6125  */
   6126 #define	WM_FIFO_HDR		0x10
   6127 #define	WM_82547_PAD_LEN	0x3e0
   6128 static int
   6129 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6130 {
   6131 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6132 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6133 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6134 
   6135 	/* Just return if already stalled. */
   6136 	if (txq->txq_fifo_stall)
   6137 		return 1;
   6138 
   6139 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6140 		/* Stall only occurs in half-duplex mode. */
   6141 		goto send_packet;
   6142 	}
   6143 
   6144 	if (len >= WM_82547_PAD_LEN + space) {
   6145 		txq->txq_fifo_stall = 1;
   6146 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6147 		return 1;
   6148 	}
   6149 
   6150  send_packet:
   6151 	txq->txq_fifo_head += len;
   6152 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6153 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6154 
   6155 	return 0;
   6156 }
   6157 
   6158 static int
   6159 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6160 {
   6161 	int error;
   6162 
   6163 	/*
   6164 	 * Allocate the control data structures, and create and load the
   6165 	 * DMA map for it.
   6166 	 *
   6167 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6168 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6169 	 * both sets within the same 4G segment.
   6170 	 */
   6171 	if (sc->sc_type < WM_T_82544)
   6172 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6173 	else
   6174 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6175 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6176 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6177 	else
   6178 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6179 
   6180 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6181 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6182 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6183 		aprint_error_dev(sc->sc_dev,
   6184 		    "unable to allocate TX control data, error = %d\n",
   6185 		    error);
   6186 		goto fail_0;
   6187 	}
   6188 
   6189 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6190 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6191 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6192 		aprint_error_dev(sc->sc_dev,
   6193 		    "unable to map TX control data, error = %d\n", error);
   6194 		goto fail_1;
   6195 	}
   6196 
   6197 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6198 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6199 		aprint_error_dev(sc->sc_dev,
   6200 		    "unable to create TX control data DMA map, error = %d\n",
   6201 		    error);
   6202 		goto fail_2;
   6203 	}
   6204 
   6205 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6206 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6207 		aprint_error_dev(sc->sc_dev,
   6208 		    "unable to load TX control data DMA map, error = %d\n",
   6209 		    error);
   6210 		goto fail_3;
   6211 	}
   6212 
   6213 	return 0;
   6214 
   6215  fail_3:
   6216 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6217  fail_2:
   6218 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6219 	    WM_TXDESCS_SIZE(txq));
   6220  fail_1:
   6221 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6222  fail_0:
   6223 	return error;
   6224 }
   6225 
   6226 static void
   6227 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6228 {
   6229 
   6230 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6231 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6232 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6233 	    WM_TXDESCS_SIZE(txq));
   6234 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6235 }
   6236 
   6237 static int
   6238 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6239 {
   6240 	int error;
   6241 	size_t rxq_descs_size;
   6242 
   6243 	/*
   6244 	 * Allocate the control data structures, and create and load the
   6245 	 * DMA map for it.
   6246 	 *
   6247 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6248 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6249 	 * both sets within the same 4G segment.
   6250 	 */
   6251 	rxq->rxq_ndesc = WM_NRXDESC;
   6252 	if (sc->sc_type == WM_T_82574)
   6253 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6254 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6255 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6256 	else
   6257 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6258 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6259 
   6260 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6261 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6262 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6263 		aprint_error_dev(sc->sc_dev,
   6264 		    "unable to allocate RX control data, error = %d\n",
   6265 		    error);
   6266 		goto fail_0;
   6267 	}
   6268 
   6269 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6270 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6271 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6272 		aprint_error_dev(sc->sc_dev,
   6273 		    "unable to map RX control data, error = %d\n", error);
   6274 		goto fail_1;
   6275 	}
   6276 
   6277 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6278 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6279 		aprint_error_dev(sc->sc_dev,
   6280 		    "unable to create RX control data DMA map, error = %d\n",
   6281 		    error);
   6282 		goto fail_2;
   6283 	}
   6284 
   6285 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6286 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6287 		aprint_error_dev(sc->sc_dev,
   6288 		    "unable to load RX control data DMA map, error = %d\n",
   6289 		    error);
   6290 		goto fail_3;
   6291 	}
   6292 
   6293 	return 0;
   6294 
   6295  fail_3:
   6296 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6297  fail_2:
   6298 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6299 	    rxq_descs_size);
   6300  fail_1:
   6301 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6302  fail_0:
   6303 	return error;
   6304 }
   6305 
   6306 static void
   6307 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6308 {
   6309 
   6310 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6311 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6312 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6313 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6314 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6315 }
   6316 
   6317 
   6318 static int
   6319 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6320 {
   6321 	int i, error;
   6322 
   6323 	/* Create the transmit buffer DMA maps. */
   6324 	WM_TXQUEUELEN(txq) =
   6325 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6326 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6327 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6328 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6329 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6330 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6331 			aprint_error_dev(sc->sc_dev,
   6332 			    "unable to create Tx DMA map %d, error = %d\n",
   6333 			    i, error);
   6334 			goto fail;
   6335 		}
   6336 	}
   6337 
   6338 	return 0;
   6339 
   6340  fail:
   6341 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6342 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6343 			bus_dmamap_destroy(sc->sc_dmat,
   6344 			    txq->txq_soft[i].txs_dmamap);
   6345 	}
   6346 	return error;
   6347 }
   6348 
   6349 static void
   6350 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6351 {
   6352 	int i;
   6353 
   6354 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6355 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6356 			bus_dmamap_destroy(sc->sc_dmat,
   6357 			    txq->txq_soft[i].txs_dmamap);
   6358 	}
   6359 }
   6360 
   6361 static int
   6362 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6363 {
   6364 	int i, error;
   6365 
   6366 	/* Create the receive buffer DMA maps. */
   6367 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6368 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6369 			    MCLBYTES, 0, 0,
   6370 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6371 			aprint_error_dev(sc->sc_dev,
   6372 			    "unable to create Rx DMA map %d error = %d\n",
   6373 			    i, error);
   6374 			goto fail;
   6375 		}
   6376 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6377 	}
   6378 
   6379 	return 0;
   6380 
   6381  fail:
   6382 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6383 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6384 			bus_dmamap_destroy(sc->sc_dmat,
   6385 			    rxq->rxq_soft[i].rxs_dmamap);
   6386 	}
   6387 	return error;
   6388 }
   6389 
   6390 static void
   6391 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6392 {
   6393 	int i;
   6394 
   6395 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6396 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6397 			bus_dmamap_destroy(sc->sc_dmat,
   6398 			    rxq->rxq_soft[i].rxs_dmamap);
   6399 	}
   6400 }
   6401 
   6402 /*
   6403  * wm_alloc_quques:
   6404  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6405  */
   6406 static int
   6407 wm_alloc_txrx_queues(struct wm_softc *sc)
   6408 {
   6409 	int i, error, tx_done, rx_done;
   6410 
   6411 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6412 	    KM_SLEEP);
   6413 	if (sc->sc_queue == NULL) {
   6414 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6415 		error = ENOMEM;
   6416 		goto fail_0;
   6417 	}
   6418 
   6419 	/*
   6420 	 * For transmission
   6421 	 */
   6422 	error = 0;
   6423 	tx_done = 0;
   6424 	for (i = 0; i < sc->sc_nqueues; i++) {
   6425 #ifdef WM_EVENT_COUNTERS
   6426 		int j;
   6427 		const char *xname;
   6428 #endif
   6429 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6430 		txq->txq_sc = sc;
   6431 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6432 
   6433 		error = wm_alloc_tx_descs(sc, txq);
   6434 		if (error)
   6435 			break;
   6436 		error = wm_alloc_tx_buffer(sc, txq);
   6437 		if (error) {
   6438 			wm_free_tx_descs(sc, txq);
   6439 			break;
   6440 		}
   6441 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6442 		if (txq->txq_interq == NULL) {
   6443 			wm_free_tx_descs(sc, txq);
   6444 			wm_free_tx_buffer(sc, txq);
   6445 			error = ENOMEM;
   6446 			break;
   6447 		}
   6448 
   6449 #ifdef WM_EVENT_COUNTERS
   6450 		xname = device_xname(sc->sc_dev);
   6451 
   6452 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6453 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6454 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6455 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6456 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6457 
   6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6460 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6461 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6462 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6463 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6464 
   6465 		for (j = 0; j < WM_NTXSEGS; j++) {
   6466 			snprintf(txq->txq_txseg_evcnt_names[j],
   6467 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6468 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6469 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6470 		}
   6471 
   6472 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6473 
   6474 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6475 #endif /* WM_EVENT_COUNTERS */
   6476 
   6477 		tx_done++;
   6478 	}
   6479 	if (error)
   6480 		goto fail_1;
   6481 
   6482 	/*
   6483 	 * For recieve
   6484 	 */
   6485 	error = 0;
   6486 	rx_done = 0;
   6487 	for (i = 0; i < sc->sc_nqueues; i++) {
   6488 #ifdef WM_EVENT_COUNTERS
   6489 		const char *xname;
   6490 #endif
   6491 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6492 		rxq->rxq_sc = sc;
   6493 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6494 
   6495 		error = wm_alloc_rx_descs(sc, rxq);
   6496 		if (error)
   6497 			break;
   6498 
   6499 		error = wm_alloc_rx_buffer(sc, rxq);
   6500 		if (error) {
   6501 			wm_free_rx_descs(sc, rxq);
   6502 			break;
   6503 		}
   6504 
   6505 #ifdef WM_EVENT_COUNTERS
   6506 		xname = device_xname(sc->sc_dev);
   6507 
   6508 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6509 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6510 
   6511 		WM_Q_MISC_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6512 		WM_Q_MISC_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6513 #endif /* WM_EVENT_COUNTERS */
   6514 
   6515 		rx_done++;
   6516 	}
   6517 	if (error)
   6518 		goto fail_2;
   6519 
   6520 	return 0;
   6521 
   6522  fail_2:
   6523 	for (i = 0; i < rx_done; i++) {
   6524 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6525 		wm_free_rx_buffer(sc, rxq);
   6526 		wm_free_rx_descs(sc, rxq);
   6527 		if (rxq->rxq_lock)
   6528 			mutex_obj_free(rxq->rxq_lock);
   6529 	}
   6530  fail_1:
   6531 	for (i = 0; i < tx_done; i++) {
   6532 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6533 		pcq_destroy(txq->txq_interq);
   6534 		wm_free_tx_buffer(sc, txq);
   6535 		wm_free_tx_descs(sc, txq);
   6536 		if (txq->txq_lock)
   6537 			mutex_obj_free(txq->txq_lock);
   6538 	}
   6539 
   6540 	kmem_free(sc->sc_queue,
   6541 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6542  fail_0:
   6543 	return error;
   6544 }
   6545 
   6546 /*
   6547  * wm_free_quques:
   6548  *	Free {tx,rx}descs and {tx,rx} buffers
   6549  */
   6550 static void
   6551 wm_free_txrx_queues(struct wm_softc *sc)
   6552 {
   6553 	int i;
   6554 
   6555 	for (i = 0; i < sc->sc_nqueues; i++) {
   6556 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6557 
   6558 #ifdef WM_EVENT_COUNTERS
   6559 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6560 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6561 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6562 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6563 #endif /* WM_EVENT_COUNTERS */
   6564 
   6565 		wm_free_rx_buffer(sc, rxq);
   6566 		wm_free_rx_descs(sc, rxq);
   6567 		if (rxq->rxq_lock)
   6568 			mutex_obj_free(rxq->rxq_lock);
   6569 	}
   6570 
   6571 	for (i = 0; i < sc->sc_nqueues; i++) {
   6572 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6573 		struct mbuf *m;
   6574 #ifdef WM_EVENT_COUNTERS
   6575 		int j;
   6576 
   6577 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6578 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6579 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6580 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6581 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6582 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6583 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6584 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6585 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6586 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6587 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6588 
   6589 		for (j = 0; j < WM_NTXSEGS; j++)
   6590 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6591 
   6592 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6593 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6594 #endif /* WM_EVENT_COUNTERS */
   6595 
   6596 		/* drain txq_interq */
   6597 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6598 			m_freem(m);
   6599 		pcq_destroy(txq->txq_interq);
   6600 
   6601 		wm_free_tx_buffer(sc, txq);
   6602 		wm_free_tx_descs(sc, txq);
   6603 		if (txq->txq_lock)
   6604 			mutex_obj_free(txq->txq_lock);
   6605 	}
   6606 
   6607 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6608 }
   6609 
   6610 static void
   6611 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6612 {
   6613 
   6614 	KASSERT(mutex_owned(txq->txq_lock));
   6615 
   6616 	/* Initialize the transmit descriptor ring. */
   6617 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6618 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6619 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6620 	txq->txq_free = WM_NTXDESC(txq);
   6621 	txq->txq_next = 0;
   6622 }
   6623 
   6624 static void
   6625 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6626     struct wm_txqueue *txq)
   6627 {
   6628 
   6629 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6630 		device_xname(sc->sc_dev), __func__));
   6631 	KASSERT(mutex_owned(txq->txq_lock));
   6632 
   6633 	if (sc->sc_type < WM_T_82543) {
   6634 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6635 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6636 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6637 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6638 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6639 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6640 	} else {
   6641 		int qid = wmq->wmq_id;
   6642 
   6643 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6644 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6645 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6646 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6647 
   6648 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6649 			/*
   6650 			 * Don't write TDT before TCTL.EN is set.
   6651 			 * See the document.
   6652 			 */
   6653 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6654 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6655 			    | TXDCTL_WTHRESH(0));
   6656 		else {
   6657 			/* XXX should update with AIM? */
   6658 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6659 			if (sc->sc_type >= WM_T_82540) {
   6660 				/* should be same */
   6661 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6662 			}
   6663 
   6664 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6665 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6666 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6667 		}
   6668 	}
   6669 }
   6670 
   6671 static void
   6672 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6673 {
   6674 	int i;
   6675 
   6676 	KASSERT(mutex_owned(txq->txq_lock));
   6677 
   6678 	/* Initialize the transmit job descriptors. */
   6679 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6680 		txq->txq_soft[i].txs_mbuf = NULL;
   6681 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6682 	txq->txq_snext = 0;
   6683 	txq->txq_sdirty = 0;
   6684 }
   6685 
   6686 static void
   6687 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6688     struct wm_txqueue *txq)
   6689 {
   6690 
   6691 	KASSERT(mutex_owned(txq->txq_lock));
   6692 
   6693 	/*
   6694 	 * Set up some register offsets that are different between
   6695 	 * the i82542 and the i82543 and later chips.
   6696 	 */
   6697 	if (sc->sc_type < WM_T_82543)
   6698 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6699 	else
   6700 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6701 
   6702 	wm_init_tx_descs(sc, txq);
   6703 	wm_init_tx_regs(sc, wmq, txq);
   6704 	wm_init_tx_buffer(sc, txq);
   6705 
   6706 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6707 	txq->txq_sending = false;
   6708 }
   6709 
   6710 static void
   6711 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6712     struct wm_rxqueue *rxq)
   6713 {
   6714 
   6715 	KASSERT(mutex_owned(rxq->rxq_lock));
   6716 
   6717 	/*
   6718 	 * Initialize the receive descriptor and receive job
   6719 	 * descriptor rings.
   6720 	 */
   6721 	if (sc->sc_type < WM_T_82543) {
   6722 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6723 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6724 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6725 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6726 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6727 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6728 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6729 
   6730 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6731 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6732 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6733 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6734 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6735 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6736 	} else {
   6737 		int qid = wmq->wmq_id;
   6738 
   6739 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6740 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6741 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6742 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6743 
   6744 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6745 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6746 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6747 
   6748 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6749 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6750 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6751 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6752 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6753 			    | RXDCTL_WTHRESH(1));
   6754 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6755 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6756 		} else {
   6757 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6758 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6759 			/* XXX should update with AIM? */
   6760 			CSR_WRITE(sc, WMREG_RDTR,
   6761 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6762 			/* MUST be same */
   6763 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6764 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6765 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6766 		}
   6767 	}
   6768 }
   6769 
   6770 static int
   6771 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6772 {
   6773 	struct wm_rxsoft *rxs;
   6774 	int error, i;
   6775 
   6776 	KASSERT(mutex_owned(rxq->rxq_lock));
   6777 
   6778 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6779 		rxs = &rxq->rxq_soft[i];
   6780 		if (rxs->rxs_mbuf == NULL) {
   6781 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6782 				log(LOG_ERR, "%s: unable to allocate or map "
   6783 				    "rx buffer %d, error = %d\n",
   6784 				    device_xname(sc->sc_dev), i, error);
   6785 				/*
   6786 				 * XXX Should attempt to run with fewer receive
   6787 				 * XXX buffers instead of just failing.
   6788 				 */
   6789 				wm_rxdrain(rxq);
   6790 				return ENOMEM;
   6791 			}
   6792 		} else {
   6793 			/*
   6794 			 * For 82575 and 82576, the RX descriptors must be
   6795 			 * initialized after the setting of RCTL.EN in
   6796 			 * wm_set_filter()
   6797 			 */
   6798 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6799 				wm_init_rxdesc(rxq, i);
   6800 		}
   6801 	}
   6802 	rxq->rxq_ptr = 0;
   6803 	rxq->rxq_discard = 0;
   6804 	WM_RXCHAIN_RESET(rxq);
   6805 
   6806 	return 0;
   6807 }
   6808 
   6809 static int
   6810 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6811     struct wm_rxqueue *rxq)
   6812 {
   6813 
   6814 	KASSERT(mutex_owned(rxq->rxq_lock));
   6815 
   6816 	/*
   6817 	 * Set up some register offsets that are different between
   6818 	 * the i82542 and the i82543 and later chips.
   6819 	 */
   6820 	if (sc->sc_type < WM_T_82543)
   6821 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6822 	else
   6823 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6824 
   6825 	wm_init_rx_regs(sc, wmq, rxq);
   6826 	return wm_init_rx_buffer(sc, rxq);
   6827 }
   6828 
   6829 /*
   6830  * wm_init_quques:
   6831  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6832  */
   6833 static int
   6834 wm_init_txrx_queues(struct wm_softc *sc)
   6835 {
   6836 	int i, error = 0;
   6837 
   6838 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6839 		device_xname(sc->sc_dev), __func__));
   6840 
   6841 	for (i = 0; i < sc->sc_nqueues; i++) {
   6842 		struct wm_queue *wmq = &sc->sc_queue[i];
   6843 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6844 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6845 
   6846 		/*
   6847 		 * TODO
   6848 		 * Currently, use constant variable instead of AIM.
   6849 		 * Furthermore, the interrupt interval of multiqueue which use
   6850 		 * polling mode is less than default value.
   6851 		 * More tuning and AIM are required.
   6852 		 */
   6853 		if (wm_is_using_multiqueue(sc))
   6854 			wmq->wmq_itr = 50;
   6855 		else
   6856 			wmq->wmq_itr = sc->sc_itr_init;
   6857 		wmq->wmq_set_itr = true;
   6858 
   6859 		mutex_enter(txq->txq_lock);
   6860 		wm_init_tx_queue(sc, wmq, txq);
   6861 		mutex_exit(txq->txq_lock);
   6862 
   6863 		mutex_enter(rxq->rxq_lock);
   6864 		error = wm_init_rx_queue(sc, wmq, rxq);
   6865 		mutex_exit(rxq->rxq_lock);
   6866 		if (error)
   6867 			break;
   6868 	}
   6869 
   6870 	return error;
   6871 }
   6872 
   6873 /*
   6874  * wm_tx_offload:
   6875  *
   6876  *	Set up TCP/IP checksumming parameters for the
   6877  *	specified packet.
   6878  */
   6879 static int
   6880 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6881     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6882 {
   6883 	struct mbuf *m0 = txs->txs_mbuf;
   6884 	struct livengood_tcpip_ctxdesc *t;
   6885 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6886 	uint32_t ipcse;
   6887 	struct ether_header *eh;
   6888 	int offset, iphl;
   6889 	uint8_t fields;
   6890 
   6891 	/*
   6892 	 * XXX It would be nice if the mbuf pkthdr had offset
   6893 	 * fields for the protocol headers.
   6894 	 */
   6895 
   6896 	eh = mtod(m0, struct ether_header *);
   6897 	switch (htons(eh->ether_type)) {
   6898 	case ETHERTYPE_IP:
   6899 	case ETHERTYPE_IPV6:
   6900 		offset = ETHER_HDR_LEN;
   6901 		break;
   6902 
   6903 	case ETHERTYPE_VLAN:
   6904 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6905 		break;
   6906 
   6907 	default:
   6908 		/*
   6909 		 * Don't support this protocol or encapsulation.
   6910 		 */
   6911 		*fieldsp = 0;
   6912 		*cmdp = 0;
   6913 		return 0;
   6914 	}
   6915 
   6916 	if ((m0->m_pkthdr.csum_flags &
   6917 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6918 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6919 	} else {
   6920 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6921 	}
   6922 	ipcse = offset + iphl - 1;
   6923 
   6924 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6925 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6926 	seg = 0;
   6927 	fields = 0;
   6928 
   6929 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6930 		int hlen = offset + iphl;
   6931 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6932 
   6933 		if (__predict_false(m0->m_len <
   6934 				    (hlen + sizeof(struct tcphdr)))) {
   6935 			/*
   6936 			 * TCP/IP headers are not in the first mbuf; we need
   6937 			 * to do this the slow and painful way.  Let's just
   6938 			 * hope this doesn't happen very often.
   6939 			 */
   6940 			struct tcphdr th;
   6941 
   6942 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6943 
   6944 			m_copydata(m0, hlen, sizeof(th), &th);
   6945 			if (v4) {
   6946 				struct ip ip;
   6947 
   6948 				m_copydata(m0, offset, sizeof(ip), &ip);
   6949 				ip.ip_len = 0;
   6950 				m_copyback(m0,
   6951 				    offset + offsetof(struct ip, ip_len),
   6952 				    sizeof(ip.ip_len), &ip.ip_len);
   6953 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6954 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6955 			} else {
   6956 				struct ip6_hdr ip6;
   6957 
   6958 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6959 				ip6.ip6_plen = 0;
   6960 				m_copyback(m0,
   6961 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6962 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6963 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6964 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6965 			}
   6966 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6967 			    sizeof(th.th_sum), &th.th_sum);
   6968 
   6969 			hlen += th.th_off << 2;
   6970 		} else {
   6971 			/*
   6972 			 * TCP/IP headers are in the first mbuf; we can do
   6973 			 * this the easy way.
   6974 			 */
   6975 			struct tcphdr *th;
   6976 
   6977 			if (v4) {
   6978 				struct ip *ip =
   6979 				    (void *)(mtod(m0, char *) + offset);
   6980 				th = (void *)(mtod(m0, char *) + hlen);
   6981 
   6982 				ip->ip_len = 0;
   6983 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6984 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6985 			} else {
   6986 				struct ip6_hdr *ip6 =
   6987 				    (void *)(mtod(m0, char *) + offset);
   6988 				th = (void *)(mtod(m0, char *) + hlen);
   6989 
   6990 				ip6->ip6_plen = 0;
   6991 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6992 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6993 			}
   6994 			hlen += th->th_off << 2;
   6995 		}
   6996 
   6997 		if (v4) {
   6998 			WM_Q_EVCNT_INCR(txq, txtso);
   6999 			cmdlen |= WTX_TCPIP_CMD_IP;
   7000 		} else {
   7001 			WM_Q_EVCNT_INCR(txq, txtso6);
   7002 			ipcse = 0;
   7003 		}
   7004 		cmd |= WTX_TCPIP_CMD_TSE;
   7005 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7006 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7007 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7008 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7009 	}
   7010 
   7011 	/*
   7012 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7013 	 * offload feature, if we load the context descriptor, we
   7014 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7015 	 */
   7016 
   7017 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7018 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7019 	    WTX_TCPIP_IPCSE(ipcse);
   7020 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7021 		WM_Q_EVCNT_INCR(txq, txipsum);
   7022 		fields |= WTX_IXSM;
   7023 	}
   7024 
   7025 	offset += iphl;
   7026 
   7027 	if (m0->m_pkthdr.csum_flags &
   7028 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7029 		WM_Q_EVCNT_INCR(txq, txtusum);
   7030 		fields |= WTX_TXSM;
   7031 		tucs = WTX_TCPIP_TUCSS(offset) |
   7032 		    WTX_TCPIP_TUCSO(offset +
   7033 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7034 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7035 	} else if ((m0->m_pkthdr.csum_flags &
   7036 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7037 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7038 		fields |= WTX_TXSM;
   7039 		tucs = WTX_TCPIP_TUCSS(offset) |
   7040 		    WTX_TCPIP_TUCSO(offset +
   7041 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7042 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7043 	} else {
   7044 		/* Just initialize it to a valid TCP context. */
   7045 		tucs = WTX_TCPIP_TUCSS(offset) |
   7046 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7047 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7048 	}
   7049 
   7050 	/*
   7051 	 * We don't have to write context descriptor for every packet
   7052 	 * except for 82574. For 82574, we must write context descriptor
   7053 	 * for every packet when we use two descriptor queues.
   7054 	 * It would be overhead to write context descriptor for every packet,
   7055 	 * however it does not cause problems.
   7056 	 */
   7057 	/* Fill in the context descriptor. */
   7058 	t = (struct livengood_tcpip_ctxdesc *)
   7059 	    &txq->txq_descs[txq->txq_next];
   7060 	t->tcpip_ipcs = htole32(ipcs);
   7061 	t->tcpip_tucs = htole32(tucs);
   7062 	t->tcpip_cmdlen = htole32(cmdlen);
   7063 	t->tcpip_seg = htole32(seg);
   7064 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7065 
   7066 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7067 	txs->txs_ndesc++;
   7068 
   7069 	*cmdp = cmd;
   7070 	*fieldsp = fields;
   7071 
   7072 	return 0;
   7073 }
   7074 
   7075 static inline int
   7076 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7077 {
   7078 	struct wm_softc *sc = ifp->if_softc;
   7079 	u_int cpuid = cpu_index(curcpu());
   7080 
   7081 	/*
   7082 	 * Currently, simple distribute strategy.
   7083 	 * TODO:
   7084 	 * distribute by flowid(RSS has value).
   7085 	 */
   7086         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7087 }
   7088 
   7089 /*
   7090  * wm_start:		[ifnet interface function]
   7091  *
   7092  *	Start packet transmission on the interface.
   7093  */
   7094 static void
   7095 wm_start(struct ifnet *ifp)
   7096 {
   7097 	struct wm_softc *sc = ifp->if_softc;
   7098 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7099 
   7100 #ifdef WM_MPSAFE
   7101 	KASSERT(if_is_mpsafe(ifp));
   7102 #endif
   7103 	/*
   7104 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7105 	 */
   7106 
   7107 	mutex_enter(txq->txq_lock);
   7108 	if (!txq->txq_stopping)
   7109 		wm_start_locked(ifp);
   7110 	mutex_exit(txq->txq_lock);
   7111 }
   7112 
   7113 static void
   7114 wm_start_locked(struct ifnet *ifp)
   7115 {
   7116 	struct wm_softc *sc = ifp->if_softc;
   7117 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7118 
   7119 	wm_send_common_locked(ifp, txq, false);
   7120 }
   7121 
   7122 static int
   7123 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7124 {
   7125 	int qid;
   7126 	struct wm_softc *sc = ifp->if_softc;
   7127 	struct wm_txqueue *txq;
   7128 
   7129 	qid = wm_select_txqueue(ifp, m);
   7130 	txq = &sc->sc_queue[qid].wmq_txq;
   7131 
   7132 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7133 		m_freem(m);
   7134 		WM_Q_EVCNT_INCR(txq, txdrop);
   7135 		return ENOBUFS;
   7136 	}
   7137 
   7138 	/*
   7139 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7140 	 */
   7141 	ifp->if_obytes += m->m_pkthdr.len;
   7142 	if (m->m_flags & M_MCAST)
   7143 		ifp->if_omcasts++;
   7144 
   7145 	if (mutex_tryenter(txq->txq_lock)) {
   7146 		if (!txq->txq_stopping)
   7147 			wm_transmit_locked(ifp, txq);
   7148 		mutex_exit(txq->txq_lock);
   7149 	}
   7150 
   7151 	return 0;
   7152 }
   7153 
   7154 static void
   7155 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7156 {
   7157 
   7158 	wm_send_common_locked(ifp, txq, true);
   7159 }
   7160 
   7161 static void
   7162 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7163     bool is_transmit)
   7164 {
   7165 	struct wm_softc *sc = ifp->if_softc;
   7166 	struct mbuf *m0;
   7167 	struct wm_txsoft *txs;
   7168 	bus_dmamap_t dmamap;
   7169 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7170 	bus_addr_t curaddr;
   7171 	bus_size_t seglen, curlen;
   7172 	uint32_t cksumcmd;
   7173 	uint8_t cksumfields;
   7174 
   7175 	KASSERT(mutex_owned(txq->txq_lock));
   7176 
   7177 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7178 		return;
   7179 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7180 		return;
   7181 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7182 		return;
   7183 
   7184 	/* Remember the previous number of free descriptors. */
   7185 	ofree = txq->txq_free;
   7186 
   7187 	/*
   7188 	 * Loop through the send queue, setting up transmit descriptors
   7189 	 * until we drain the queue, or use up all available transmit
   7190 	 * descriptors.
   7191 	 */
   7192 	for (;;) {
   7193 		m0 = NULL;
   7194 
   7195 		/* Get a work queue entry. */
   7196 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7197 			wm_txeof(txq, UINT_MAX);
   7198 			if (txq->txq_sfree == 0) {
   7199 				DPRINTF(WM_DEBUG_TX,
   7200 				    ("%s: TX: no free job descriptors\n",
   7201 					device_xname(sc->sc_dev)));
   7202 				WM_Q_EVCNT_INCR(txq, txsstall);
   7203 				break;
   7204 			}
   7205 		}
   7206 
   7207 		/* Grab a packet off the queue. */
   7208 		if (is_transmit)
   7209 			m0 = pcq_get(txq->txq_interq);
   7210 		else
   7211 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7212 		if (m0 == NULL)
   7213 			break;
   7214 
   7215 		DPRINTF(WM_DEBUG_TX,
   7216 		    ("%s: TX: have packet to transmit: %p\n",
   7217 		    device_xname(sc->sc_dev), m0));
   7218 
   7219 		txs = &txq->txq_soft[txq->txq_snext];
   7220 		dmamap = txs->txs_dmamap;
   7221 
   7222 		use_tso = (m0->m_pkthdr.csum_flags &
   7223 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7224 
   7225 		/*
   7226 		 * So says the Linux driver:
   7227 		 * The controller does a simple calculation to make sure
   7228 		 * there is enough room in the FIFO before initiating the
   7229 		 * DMA for each buffer.  The calc is:
   7230 		 *	4 = ceil(buffer len / MSS)
   7231 		 * To make sure we don't overrun the FIFO, adjust the max
   7232 		 * buffer len if the MSS drops.
   7233 		 */
   7234 		dmamap->dm_maxsegsz =
   7235 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7236 		    ? m0->m_pkthdr.segsz << 2
   7237 		    : WTX_MAX_LEN;
   7238 
   7239 		/*
   7240 		 * Load the DMA map.  If this fails, the packet either
   7241 		 * didn't fit in the allotted number of segments, or we
   7242 		 * were short on resources.  For the too-many-segments
   7243 		 * case, we simply report an error and drop the packet,
   7244 		 * since we can't sanely copy a jumbo packet to a single
   7245 		 * buffer.
   7246 		 */
   7247 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7248 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7249 		if (error) {
   7250 			if (error == EFBIG) {
   7251 				WM_Q_EVCNT_INCR(txq, txdrop);
   7252 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7253 				    "DMA segments, dropping...\n",
   7254 				    device_xname(sc->sc_dev));
   7255 				wm_dump_mbuf_chain(sc, m0);
   7256 				m_freem(m0);
   7257 				continue;
   7258 			}
   7259 			/*  Short on resources, just stop for now. */
   7260 			DPRINTF(WM_DEBUG_TX,
   7261 			    ("%s: TX: dmamap load failed: %d\n",
   7262 			    device_xname(sc->sc_dev), error));
   7263 			break;
   7264 		}
   7265 
   7266 		segs_needed = dmamap->dm_nsegs;
   7267 		if (use_tso) {
   7268 			/* For sentinel descriptor; see below. */
   7269 			segs_needed++;
   7270 		}
   7271 
   7272 		/*
   7273 		 * Ensure we have enough descriptors free to describe
   7274 		 * the packet.  Note, we always reserve one descriptor
   7275 		 * at the end of the ring due to the semantics of the
   7276 		 * TDT register, plus one more in the event we need
   7277 		 * to load offload context.
   7278 		 */
   7279 		if (segs_needed > txq->txq_free - 2) {
   7280 			/*
   7281 			 * Not enough free descriptors to transmit this
   7282 			 * packet.  We haven't committed anything yet,
   7283 			 * so just unload the DMA map, put the packet
   7284 			 * pack on the queue, and punt.  Notify the upper
   7285 			 * layer that there are no more slots left.
   7286 			 */
   7287 			DPRINTF(WM_DEBUG_TX,
   7288 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7289 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7290 			    segs_needed, txq->txq_free - 1));
   7291 			if (!is_transmit)
   7292 				ifp->if_flags |= IFF_OACTIVE;
   7293 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7294 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7295 			WM_Q_EVCNT_INCR(txq, txdstall);
   7296 			break;
   7297 		}
   7298 
   7299 		/*
   7300 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7301 		 * once we know we can transmit the packet, since we
   7302 		 * do some internal FIFO space accounting here.
   7303 		 */
   7304 		if (sc->sc_type == WM_T_82547 &&
   7305 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7306 			DPRINTF(WM_DEBUG_TX,
   7307 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7308 			    device_xname(sc->sc_dev)));
   7309 			if (!is_transmit)
   7310 				ifp->if_flags |= IFF_OACTIVE;
   7311 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7312 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7313 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7314 			break;
   7315 		}
   7316 
   7317 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7318 
   7319 		DPRINTF(WM_DEBUG_TX,
   7320 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7321 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7322 
   7323 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7324 
   7325 		/*
   7326 		 * Store a pointer to the packet so that we can free it
   7327 		 * later.
   7328 		 *
   7329 		 * Initially, we consider the number of descriptors the
   7330 		 * packet uses the number of DMA segments.  This may be
   7331 		 * incremented by 1 if we do checksum offload (a descriptor
   7332 		 * is used to set the checksum context).
   7333 		 */
   7334 		txs->txs_mbuf = m0;
   7335 		txs->txs_firstdesc = txq->txq_next;
   7336 		txs->txs_ndesc = segs_needed;
   7337 
   7338 		/* Set up offload parameters for this packet. */
   7339 		if (m0->m_pkthdr.csum_flags &
   7340 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7341 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7342 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7343 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7344 					  &cksumfields) != 0) {
   7345 				/* Error message already displayed. */
   7346 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7347 				continue;
   7348 			}
   7349 		} else {
   7350 			cksumcmd = 0;
   7351 			cksumfields = 0;
   7352 		}
   7353 
   7354 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7355 
   7356 		/* Sync the DMA map. */
   7357 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7358 		    BUS_DMASYNC_PREWRITE);
   7359 
   7360 		/* Initialize the transmit descriptor. */
   7361 		for (nexttx = txq->txq_next, seg = 0;
   7362 		     seg < dmamap->dm_nsegs; seg++) {
   7363 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7364 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7365 			     seglen != 0;
   7366 			     curaddr += curlen, seglen -= curlen,
   7367 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7368 				curlen = seglen;
   7369 
   7370 				/*
   7371 				 * So says the Linux driver:
   7372 				 * Work around for premature descriptor
   7373 				 * write-backs in TSO mode.  Append a
   7374 				 * 4-byte sentinel descriptor.
   7375 				 */
   7376 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7377 				    curlen > 8)
   7378 					curlen -= 4;
   7379 
   7380 				wm_set_dma_addr(
   7381 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7382 				txq->txq_descs[nexttx].wtx_cmdlen
   7383 				    = htole32(cksumcmd | curlen);
   7384 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7385 				    = 0;
   7386 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7387 				    = cksumfields;
   7388 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7389 				lasttx = nexttx;
   7390 
   7391 				DPRINTF(WM_DEBUG_TX,
   7392 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7393 				     "len %#04zx\n",
   7394 				    device_xname(sc->sc_dev), nexttx,
   7395 				    (uint64_t)curaddr, curlen));
   7396 			}
   7397 		}
   7398 
   7399 		KASSERT(lasttx != -1);
   7400 
   7401 		/*
   7402 		 * Set up the command byte on the last descriptor of
   7403 		 * the packet.  If we're in the interrupt delay window,
   7404 		 * delay the interrupt.
   7405 		 */
   7406 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7407 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7408 
   7409 		/*
   7410 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7411 		 * up the descriptor to encapsulate the packet for us.
   7412 		 *
   7413 		 * This is only valid on the last descriptor of the packet.
   7414 		 */
   7415 		if (vlan_has_tag(m0)) {
   7416 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7417 			    htole32(WTX_CMD_VLE);
   7418 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7419 			    = htole16(vlan_get_tag(m0));
   7420 		}
   7421 
   7422 		txs->txs_lastdesc = lasttx;
   7423 
   7424 		DPRINTF(WM_DEBUG_TX,
   7425 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7426 		    device_xname(sc->sc_dev),
   7427 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7428 
   7429 		/* Sync the descriptors we're using. */
   7430 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7431 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7432 
   7433 		/* Give the packet to the chip. */
   7434 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7435 
   7436 		DPRINTF(WM_DEBUG_TX,
   7437 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7438 
   7439 		DPRINTF(WM_DEBUG_TX,
   7440 		    ("%s: TX: finished transmitting packet, job %d\n",
   7441 		    device_xname(sc->sc_dev), txq->txq_snext));
   7442 
   7443 		/* Advance the tx pointer. */
   7444 		txq->txq_free -= txs->txs_ndesc;
   7445 		txq->txq_next = nexttx;
   7446 
   7447 		txq->txq_sfree--;
   7448 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7449 
   7450 		/* Pass the packet to any BPF listeners. */
   7451 		bpf_mtap(ifp, m0);
   7452 	}
   7453 
   7454 	if (m0 != NULL) {
   7455 		if (!is_transmit)
   7456 			ifp->if_flags |= IFF_OACTIVE;
   7457 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7458 		WM_Q_EVCNT_INCR(txq, txdrop);
   7459 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7460 			__func__));
   7461 		m_freem(m0);
   7462 	}
   7463 
   7464 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7465 		/* No more slots; notify upper layer. */
   7466 		if (!is_transmit)
   7467 			ifp->if_flags |= IFF_OACTIVE;
   7468 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7469 	}
   7470 
   7471 	if (txq->txq_free != ofree) {
   7472 		/* Set a watchdog timer in case the chip flakes out. */
   7473 		txq->txq_lastsent = time_uptime;
   7474 		txq->txq_sending = true;
   7475 	}
   7476 }
   7477 
   7478 /*
   7479  * wm_nq_tx_offload:
   7480  *
   7481  *	Set up TCP/IP checksumming parameters for the
   7482  *	specified packet, for NEWQUEUE devices
   7483  */
   7484 static int
   7485 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7486     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7487 {
   7488 	struct mbuf *m0 = txs->txs_mbuf;
   7489 	uint32_t vl_len, mssidx, cmdc;
   7490 	struct ether_header *eh;
   7491 	int offset, iphl;
   7492 
   7493 	/*
   7494 	 * XXX It would be nice if the mbuf pkthdr had offset
   7495 	 * fields for the protocol headers.
   7496 	 */
   7497 	*cmdlenp = 0;
   7498 	*fieldsp = 0;
   7499 
   7500 	eh = mtod(m0, struct ether_header *);
   7501 	switch (htons(eh->ether_type)) {
   7502 	case ETHERTYPE_IP:
   7503 	case ETHERTYPE_IPV6:
   7504 		offset = ETHER_HDR_LEN;
   7505 		break;
   7506 
   7507 	case ETHERTYPE_VLAN:
   7508 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7509 		break;
   7510 
   7511 	default:
   7512 		/* Don't support this protocol or encapsulation. */
   7513 		*do_csum = false;
   7514 		return 0;
   7515 	}
   7516 	*do_csum = true;
   7517 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7518 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7519 
   7520 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7521 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7522 
   7523 	if ((m0->m_pkthdr.csum_flags &
   7524 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7525 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7526 	} else {
   7527 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7528 	}
   7529 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7530 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7531 
   7532 	if (vlan_has_tag(m0)) {
   7533 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7534 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7535 		*cmdlenp |= NQTX_CMD_VLE;
   7536 	}
   7537 
   7538 	mssidx = 0;
   7539 
   7540 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7541 		int hlen = offset + iphl;
   7542 		int tcp_hlen;
   7543 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7544 
   7545 		if (__predict_false(m0->m_len <
   7546 				    (hlen + sizeof(struct tcphdr)))) {
   7547 			/*
   7548 			 * TCP/IP headers are not in the first mbuf; we need
   7549 			 * to do this the slow and painful way.  Let's just
   7550 			 * hope this doesn't happen very often.
   7551 			 */
   7552 			struct tcphdr th;
   7553 
   7554 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7555 
   7556 			m_copydata(m0, hlen, sizeof(th), &th);
   7557 			if (v4) {
   7558 				struct ip ip;
   7559 
   7560 				m_copydata(m0, offset, sizeof(ip), &ip);
   7561 				ip.ip_len = 0;
   7562 				m_copyback(m0,
   7563 				    offset + offsetof(struct ip, ip_len),
   7564 				    sizeof(ip.ip_len), &ip.ip_len);
   7565 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7566 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7567 			} else {
   7568 				struct ip6_hdr ip6;
   7569 
   7570 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7571 				ip6.ip6_plen = 0;
   7572 				m_copyback(m0,
   7573 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7574 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7575 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7576 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7577 			}
   7578 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7579 			    sizeof(th.th_sum), &th.th_sum);
   7580 
   7581 			tcp_hlen = th.th_off << 2;
   7582 		} else {
   7583 			/*
   7584 			 * TCP/IP headers are in the first mbuf; we can do
   7585 			 * this the easy way.
   7586 			 */
   7587 			struct tcphdr *th;
   7588 
   7589 			if (v4) {
   7590 				struct ip *ip =
   7591 				    (void *)(mtod(m0, char *) + offset);
   7592 				th = (void *)(mtod(m0, char *) + hlen);
   7593 
   7594 				ip->ip_len = 0;
   7595 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7596 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7597 			} else {
   7598 				struct ip6_hdr *ip6 =
   7599 				    (void *)(mtod(m0, char *) + offset);
   7600 				th = (void *)(mtod(m0, char *) + hlen);
   7601 
   7602 				ip6->ip6_plen = 0;
   7603 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7604 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7605 			}
   7606 			tcp_hlen = th->th_off << 2;
   7607 		}
   7608 		hlen += tcp_hlen;
   7609 		*cmdlenp |= NQTX_CMD_TSE;
   7610 
   7611 		if (v4) {
   7612 			WM_Q_EVCNT_INCR(txq, txtso);
   7613 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7614 		} else {
   7615 			WM_Q_EVCNT_INCR(txq, txtso6);
   7616 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7617 		}
   7618 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7619 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7620 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7621 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7622 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7623 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7624 	} else {
   7625 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7626 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7627 	}
   7628 
   7629 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7630 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7631 		cmdc |= NQTXC_CMD_IP4;
   7632 	}
   7633 
   7634 	if (m0->m_pkthdr.csum_flags &
   7635 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7636 		WM_Q_EVCNT_INCR(txq, txtusum);
   7637 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7638 			cmdc |= NQTXC_CMD_TCP;
   7639 		} else {
   7640 			cmdc |= NQTXC_CMD_UDP;
   7641 		}
   7642 		cmdc |= NQTXC_CMD_IP4;
   7643 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7644 	}
   7645 	if (m0->m_pkthdr.csum_flags &
   7646 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7647 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7648 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7649 			cmdc |= NQTXC_CMD_TCP;
   7650 		} else {
   7651 			cmdc |= NQTXC_CMD_UDP;
   7652 		}
   7653 		cmdc |= NQTXC_CMD_IP6;
   7654 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7655 	}
   7656 
   7657 	/*
   7658 	 * We don't have to write context descriptor for every packet to
   7659 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7660 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7661 	 * controllers.
   7662 	 * It would be overhead to write context descriptor for every packet,
   7663 	 * however it does not cause problems.
   7664 	 */
   7665 	/* Fill in the context descriptor. */
   7666 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7667 	    htole32(vl_len);
   7668 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7669 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7670 	    htole32(cmdc);
   7671 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7672 	    htole32(mssidx);
   7673 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7674 	DPRINTF(WM_DEBUG_TX,
   7675 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7676 	    txq->txq_next, 0, vl_len));
   7677 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7678 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7679 	txs->txs_ndesc++;
   7680 	return 0;
   7681 }
   7682 
   7683 /*
   7684  * wm_nq_start:		[ifnet interface function]
   7685  *
   7686  *	Start packet transmission on the interface for NEWQUEUE devices
   7687  */
   7688 static void
   7689 wm_nq_start(struct ifnet *ifp)
   7690 {
   7691 	struct wm_softc *sc = ifp->if_softc;
   7692 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7693 
   7694 #ifdef WM_MPSAFE
   7695 	KASSERT(if_is_mpsafe(ifp));
   7696 #endif
   7697 	/*
   7698 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7699 	 */
   7700 
   7701 	mutex_enter(txq->txq_lock);
   7702 	if (!txq->txq_stopping)
   7703 		wm_nq_start_locked(ifp);
   7704 	mutex_exit(txq->txq_lock);
   7705 }
   7706 
   7707 static void
   7708 wm_nq_start_locked(struct ifnet *ifp)
   7709 {
   7710 	struct wm_softc *sc = ifp->if_softc;
   7711 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7712 
   7713 	wm_nq_send_common_locked(ifp, txq, false);
   7714 }
   7715 
   7716 static int
   7717 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7718 {
   7719 	int qid;
   7720 	struct wm_softc *sc = ifp->if_softc;
   7721 	struct wm_txqueue *txq;
   7722 
   7723 	qid = wm_select_txqueue(ifp, m);
   7724 	txq = &sc->sc_queue[qid].wmq_txq;
   7725 
   7726 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7727 		m_freem(m);
   7728 		WM_Q_EVCNT_INCR(txq, txdrop);
   7729 		return ENOBUFS;
   7730 	}
   7731 
   7732 	/*
   7733 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7734 	 */
   7735 	ifp->if_obytes += m->m_pkthdr.len;
   7736 	if (m->m_flags & M_MCAST)
   7737 		ifp->if_omcasts++;
   7738 
   7739 	/*
   7740 	 * The situations which this mutex_tryenter() fails at running time
   7741 	 * are below two patterns.
   7742 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7743 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7744 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7745 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7746 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7747 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7748 	 * stuck, either.
   7749 	 */
   7750 	if (mutex_tryenter(txq->txq_lock)) {
   7751 		if (!txq->txq_stopping)
   7752 			wm_nq_transmit_locked(ifp, txq);
   7753 		mutex_exit(txq->txq_lock);
   7754 	}
   7755 
   7756 	return 0;
   7757 }
   7758 
   7759 static void
   7760 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7761 {
   7762 
   7763 	wm_nq_send_common_locked(ifp, txq, true);
   7764 }
   7765 
   7766 static void
   7767 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7768     bool is_transmit)
   7769 {
   7770 	struct wm_softc *sc = ifp->if_softc;
   7771 	struct mbuf *m0;
   7772 	struct wm_txsoft *txs;
   7773 	bus_dmamap_t dmamap;
   7774 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7775 	bool do_csum, sent;
   7776 
   7777 	KASSERT(mutex_owned(txq->txq_lock));
   7778 
   7779 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7780 		return;
   7781 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7782 		return;
   7783 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7784 		return;
   7785 
   7786 	sent = false;
   7787 
   7788 	/*
   7789 	 * Loop through the send queue, setting up transmit descriptors
   7790 	 * until we drain the queue, or use up all available transmit
   7791 	 * descriptors.
   7792 	 */
   7793 	for (;;) {
   7794 		m0 = NULL;
   7795 
   7796 		/* Get a work queue entry. */
   7797 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7798 			wm_txeof(txq, UINT_MAX);
   7799 			if (txq->txq_sfree == 0) {
   7800 				DPRINTF(WM_DEBUG_TX,
   7801 				    ("%s: TX: no free job descriptors\n",
   7802 					device_xname(sc->sc_dev)));
   7803 				WM_Q_EVCNT_INCR(txq, txsstall);
   7804 				break;
   7805 			}
   7806 		}
   7807 
   7808 		/* Grab a packet off the queue. */
   7809 		if (is_transmit)
   7810 			m0 = pcq_get(txq->txq_interq);
   7811 		else
   7812 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7813 		if (m0 == NULL)
   7814 			break;
   7815 
   7816 		DPRINTF(WM_DEBUG_TX,
   7817 		    ("%s: TX: have packet to transmit: %p\n",
   7818 		    device_xname(sc->sc_dev), m0));
   7819 
   7820 		txs = &txq->txq_soft[txq->txq_snext];
   7821 		dmamap = txs->txs_dmamap;
   7822 
   7823 		/*
   7824 		 * Load the DMA map.  If this fails, the packet either
   7825 		 * didn't fit in the allotted number of segments, or we
   7826 		 * were short on resources.  For the too-many-segments
   7827 		 * case, we simply report an error and drop the packet,
   7828 		 * since we can't sanely copy a jumbo packet to a single
   7829 		 * buffer.
   7830 		 */
   7831 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7832 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7833 		if (error) {
   7834 			if (error == EFBIG) {
   7835 				WM_Q_EVCNT_INCR(txq, txdrop);
   7836 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7837 				    "DMA segments, dropping...\n",
   7838 				    device_xname(sc->sc_dev));
   7839 				wm_dump_mbuf_chain(sc, m0);
   7840 				m_freem(m0);
   7841 				continue;
   7842 			}
   7843 			/* Short on resources, just stop for now. */
   7844 			DPRINTF(WM_DEBUG_TX,
   7845 			    ("%s: TX: dmamap load failed: %d\n",
   7846 			    device_xname(sc->sc_dev), error));
   7847 			break;
   7848 		}
   7849 
   7850 		segs_needed = dmamap->dm_nsegs;
   7851 
   7852 		/*
   7853 		 * Ensure we have enough descriptors free to describe
   7854 		 * the packet.  Note, we always reserve one descriptor
   7855 		 * at the end of the ring due to the semantics of the
   7856 		 * TDT register, plus one more in the event we need
   7857 		 * to load offload context.
   7858 		 */
   7859 		if (segs_needed > txq->txq_free - 2) {
   7860 			/*
   7861 			 * Not enough free descriptors to transmit this
   7862 			 * packet.  We haven't committed anything yet,
   7863 			 * so just unload the DMA map, put the packet
   7864 			 * pack on the queue, and punt.  Notify the upper
   7865 			 * layer that there are no more slots left.
   7866 			 */
   7867 			DPRINTF(WM_DEBUG_TX,
   7868 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7869 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7870 			    segs_needed, txq->txq_free - 1));
   7871 			if (!is_transmit)
   7872 				ifp->if_flags |= IFF_OACTIVE;
   7873 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7874 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7875 			WM_Q_EVCNT_INCR(txq, txdstall);
   7876 			break;
   7877 		}
   7878 
   7879 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7880 
   7881 		DPRINTF(WM_DEBUG_TX,
   7882 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7883 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7884 
   7885 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7886 
   7887 		/*
   7888 		 * Store a pointer to the packet so that we can free it
   7889 		 * later.
   7890 		 *
   7891 		 * Initially, we consider the number of descriptors the
   7892 		 * packet uses the number of DMA segments.  This may be
   7893 		 * incremented by 1 if we do checksum offload (a descriptor
   7894 		 * is used to set the checksum context).
   7895 		 */
   7896 		txs->txs_mbuf = m0;
   7897 		txs->txs_firstdesc = txq->txq_next;
   7898 		txs->txs_ndesc = segs_needed;
   7899 
   7900 		/* Set up offload parameters for this packet. */
   7901 		uint32_t cmdlen, fields, dcmdlen;
   7902 		if (m0->m_pkthdr.csum_flags &
   7903 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7904 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7905 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7906 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7907 			    &do_csum) != 0) {
   7908 				/* Error message already displayed. */
   7909 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7910 				continue;
   7911 			}
   7912 		} else {
   7913 			do_csum = false;
   7914 			cmdlen = 0;
   7915 			fields = 0;
   7916 		}
   7917 
   7918 		/* Sync the DMA map. */
   7919 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7920 		    BUS_DMASYNC_PREWRITE);
   7921 
   7922 		/* Initialize the first transmit descriptor. */
   7923 		nexttx = txq->txq_next;
   7924 		if (!do_csum) {
   7925 			/* setup a legacy descriptor */
   7926 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7927 			    dmamap->dm_segs[0].ds_addr);
   7928 			txq->txq_descs[nexttx].wtx_cmdlen =
   7929 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7930 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7931 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7932 			if (vlan_has_tag(m0)) {
   7933 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7934 				    htole32(WTX_CMD_VLE);
   7935 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7936 				    htole16(vlan_get_tag(m0));
   7937 			} else {
   7938 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7939 			}
   7940 			dcmdlen = 0;
   7941 		} else {
   7942 			/* setup an advanced data descriptor */
   7943 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7944 			    htole64(dmamap->dm_segs[0].ds_addr);
   7945 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7946 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7947 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7948 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7949 			    htole32(fields);
   7950 			DPRINTF(WM_DEBUG_TX,
   7951 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7952 			    device_xname(sc->sc_dev), nexttx,
   7953 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7954 			DPRINTF(WM_DEBUG_TX,
   7955 			    ("\t 0x%08x%08x\n", fields,
   7956 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7957 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7958 		}
   7959 
   7960 		lasttx = nexttx;
   7961 		nexttx = WM_NEXTTX(txq, nexttx);
   7962 		/*
   7963 		 * fill in the next descriptors. legacy or advanced format
   7964 		 * is the same here
   7965 		 */
   7966 		for (seg = 1; seg < dmamap->dm_nsegs;
   7967 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7968 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7969 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7970 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7971 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7972 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7973 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7974 			lasttx = nexttx;
   7975 
   7976 			DPRINTF(WM_DEBUG_TX,
   7977 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7978 			     "len %#04zx\n",
   7979 			    device_xname(sc->sc_dev), nexttx,
   7980 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7981 			    dmamap->dm_segs[seg].ds_len));
   7982 		}
   7983 
   7984 		KASSERT(lasttx != -1);
   7985 
   7986 		/*
   7987 		 * Set up the command byte on the last descriptor of
   7988 		 * the packet.  If we're in the interrupt delay window,
   7989 		 * delay the interrupt.
   7990 		 */
   7991 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7992 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7993 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7994 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7995 
   7996 		txs->txs_lastdesc = lasttx;
   7997 
   7998 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7999 		    device_xname(sc->sc_dev),
   8000 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8001 
   8002 		/* Sync the descriptors we're using. */
   8003 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8004 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8005 
   8006 		/* Give the packet to the chip. */
   8007 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8008 		sent = true;
   8009 
   8010 		DPRINTF(WM_DEBUG_TX,
   8011 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8012 
   8013 		DPRINTF(WM_DEBUG_TX,
   8014 		    ("%s: TX: finished transmitting packet, job %d\n",
   8015 		    device_xname(sc->sc_dev), txq->txq_snext));
   8016 
   8017 		/* Advance the tx pointer. */
   8018 		txq->txq_free -= txs->txs_ndesc;
   8019 		txq->txq_next = nexttx;
   8020 
   8021 		txq->txq_sfree--;
   8022 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8023 
   8024 		/* Pass the packet to any BPF listeners. */
   8025 		bpf_mtap(ifp, m0);
   8026 	}
   8027 
   8028 	if (m0 != NULL) {
   8029 		if (!is_transmit)
   8030 			ifp->if_flags |= IFF_OACTIVE;
   8031 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8032 		WM_Q_EVCNT_INCR(txq, txdrop);
   8033 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8034 			__func__));
   8035 		m_freem(m0);
   8036 	}
   8037 
   8038 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8039 		/* No more slots; notify upper layer. */
   8040 		if (!is_transmit)
   8041 			ifp->if_flags |= IFF_OACTIVE;
   8042 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8043 	}
   8044 
   8045 	if (sent) {
   8046 		/* Set a watchdog timer in case the chip flakes out. */
   8047 		txq->txq_lastsent = time_uptime;
   8048 		txq->txq_sending = true;
   8049 	}
   8050 }
   8051 
   8052 static void
   8053 wm_deferred_start_locked(struct wm_txqueue *txq)
   8054 {
   8055 	struct wm_softc *sc = txq->txq_sc;
   8056 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8057 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8058 	int qid = wmq->wmq_id;
   8059 
   8060 	KASSERT(mutex_owned(txq->txq_lock));
   8061 
   8062 	if (txq->txq_stopping) {
   8063 		mutex_exit(txq->txq_lock);
   8064 		return;
   8065 	}
   8066 
   8067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8068 		/* XXX need for ALTQ or one CPU system */
   8069 		if (qid == 0)
   8070 			wm_nq_start_locked(ifp);
   8071 		wm_nq_transmit_locked(ifp, txq);
   8072 	} else {
   8073 		/* XXX need for ALTQ or one CPU system */
   8074 		if (qid == 0)
   8075 			wm_start_locked(ifp);
   8076 		wm_transmit_locked(ifp, txq);
   8077 	}
   8078 }
   8079 
   8080 /* Interrupt */
   8081 
   8082 /*
   8083  * wm_txeof:
   8084  *
   8085  *	Helper; handle transmit interrupts.
   8086  */
   8087 static bool
   8088 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8089 {
   8090 	struct wm_softc *sc = txq->txq_sc;
   8091 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8092 	struct wm_txsoft *txs;
   8093 	int count = 0;
   8094 	int i;
   8095 	uint8_t status;
   8096 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8097 	bool more = false;
   8098 
   8099 	KASSERT(mutex_owned(txq->txq_lock));
   8100 
   8101 	if (txq->txq_stopping)
   8102 		return false;
   8103 
   8104 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8105 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8106 	if (wmq->wmq_id == 0)
   8107 		ifp->if_flags &= ~IFF_OACTIVE;
   8108 
   8109 	/*
   8110 	 * Go through the Tx list and free mbufs for those
   8111 	 * frames which have been transmitted.
   8112 	 */
   8113 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8114 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8115 		if (limit-- == 0) {
   8116 			more = true;
   8117 			DPRINTF(WM_DEBUG_TX,
   8118 			    ("%s: TX: loop limited, job %d is not processed\n",
   8119 				device_xname(sc->sc_dev), i));
   8120 			break;
   8121 		}
   8122 
   8123 		txs = &txq->txq_soft[i];
   8124 
   8125 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8126 			device_xname(sc->sc_dev), i));
   8127 
   8128 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8129 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8130 
   8131 		status =
   8132 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8133 		if ((status & WTX_ST_DD) == 0) {
   8134 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8135 			    BUS_DMASYNC_PREREAD);
   8136 			break;
   8137 		}
   8138 
   8139 		count++;
   8140 		DPRINTF(WM_DEBUG_TX,
   8141 		    ("%s: TX: job %d done: descs %d..%d\n",
   8142 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8143 		    txs->txs_lastdesc));
   8144 
   8145 		/*
   8146 		 * XXX We should probably be using the statistics
   8147 		 * XXX registers, but I don't know if they exist
   8148 		 * XXX on chips before the i82544.
   8149 		 */
   8150 
   8151 #ifdef WM_EVENT_COUNTERS
   8152 		if (status & WTX_ST_TU)
   8153 			WM_Q_EVCNT_INCR(txq, tu);
   8154 #endif /* WM_EVENT_COUNTERS */
   8155 
   8156 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8157 			ifp->if_oerrors++;
   8158 			if (status & WTX_ST_LC)
   8159 				log(LOG_WARNING, "%s: late collision\n",
   8160 				    device_xname(sc->sc_dev));
   8161 			else if (status & WTX_ST_EC) {
   8162 				ifp->if_collisions += 16;
   8163 				log(LOG_WARNING, "%s: excessive collisions\n",
   8164 				    device_xname(sc->sc_dev));
   8165 			}
   8166 		} else
   8167 			ifp->if_opackets++;
   8168 
   8169 		txq->txq_packets++;
   8170 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8171 
   8172 		txq->txq_free += txs->txs_ndesc;
   8173 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8174 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8175 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8176 		m_freem(txs->txs_mbuf);
   8177 		txs->txs_mbuf = NULL;
   8178 	}
   8179 
   8180 	/* Update the dirty transmit buffer pointer. */
   8181 	txq->txq_sdirty = i;
   8182 	DPRINTF(WM_DEBUG_TX,
   8183 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8184 
   8185 	if (count != 0)
   8186 		rnd_add_uint32(&sc->rnd_source, count);
   8187 
   8188 	/*
   8189 	 * If there are no more pending transmissions, cancel the watchdog
   8190 	 * timer.
   8191 	 */
   8192 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8193 		txq->txq_sending = false;
   8194 
   8195 	return more;
   8196 }
   8197 
   8198 static inline uint32_t
   8199 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8200 {
   8201 	struct wm_softc *sc = rxq->rxq_sc;
   8202 
   8203 	if (sc->sc_type == WM_T_82574)
   8204 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8205 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8206 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8207 	else
   8208 		return rxq->rxq_descs[idx].wrx_status;
   8209 }
   8210 
   8211 static inline uint32_t
   8212 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8213 {
   8214 	struct wm_softc *sc = rxq->rxq_sc;
   8215 
   8216 	if (sc->sc_type == WM_T_82574)
   8217 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8218 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8219 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8220 	else
   8221 		return rxq->rxq_descs[idx].wrx_errors;
   8222 }
   8223 
   8224 static inline uint16_t
   8225 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8226 {
   8227 	struct wm_softc *sc = rxq->rxq_sc;
   8228 
   8229 	if (sc->sc_type == WM_T_82574)
   8230 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8231 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8232 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8233 	else
   8234 		return rxq->rxq_descs[idx].wrx_special;
   8235 }
   8236 
   8237 static inline int
   8238 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8239 {
   8240 	struct wm_softc *sc = rxq->rxq_sc;
   8241 
   8242 	if (sc->sc_type == WM_T_82574)
   8243 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8244 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8245 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8246 	else
   8247 		return rxq->rxq_descs[idx].wrx_len;
   8248 }
   8249 
   8250 #ifdef WM_DEBUG
   8251 static inline uint32_t
   8252 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8253 {
   8254 	struct wm_softc *sc = rxq->rxq_sc;
   8255 
   8256 	if (sc->sc_type == WM_T_82574)
   8257 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8258 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8259 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8260 	else
   8261 		return 0;
   8262 }
   8263 
   8264 static inline uint8_t
   8265 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8266 {
   8267 	struct wm_softc *sc = rxq->rxq_sc;
   8268 
   8269 	if (sc->sc_type == WM_T_82574)
   8270 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8271 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8272 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8273 	else
   8274 		return 0;
   8275 }
   8276 #endif /* WM_DEBUG */
   8277 
   8278 static inline bool
   8279 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8280     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8281 {
   8282 
   8283 	if (sc->sc_type == WM_T_82574)
   8284 		return (status & ext_bit) != 0;
   8285 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8286 		return (status & nq_bit) != 0;
   8287 	else
   8288 		return (status & legacy_bit) != 0;
   8289 }
   8290 
   8291 static inline bool
   8292 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8293     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8294 {
   8295 
   8296 	if (sc->sc_type == WM_T_82574)
   8297 		return (error & ext_bit) != 0;
   8298 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8299 		return (error & nq_bit) != 0;
   8300 	else
   8301 		return (error & legacy_bit) != 0;
   8302 }
   8303 
   8304 static inline bool
   8305 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8306 {
   8307 
   8308 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8309 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8310 		return true;
   8311 	else
   8312 		return false;
   8313 }
   8314 
   8315 static inline bool
   8316 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8317 {
   8318 	struct wm_softc *sc = rxq->rxq_sc;
   8319 
   8320 	/* XXXX missing error bit for newqueue? */
   8321 	if (wm_rxdesc_is_set_error(sc, errors,
   8322 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8323 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8324 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8325 		NQRXC_ERROR_RXE)) {
   8326 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8327 		    EXTRXC_ERROR_SE, 0))
   8328 			log(LOG_WARNING, "%s: symbol error\n",
   8329 			    device_xname(sc->sc_dev));
   8330 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8331 		    EXTRXC_ERROR_SEQ, 0))
   8332 			log(LOG_WARNING, "%s: receive sequence error\n",
   8333 			    device_xname(sc->sc_dev));
   8334 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8335 		    EXTRXC_ERROR_CE, 0))
   8336 			log(LOG_WARNING, "%s: CRC error\n",
   8337 			    device_xname(sc->sc_dev));
   8338 		return true;
   8339 	}
   8340 
   8341 	return false;
   8342 }
   8343 
   8344 static inline bool
   8345 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8346 {
   8347 	struct wm_softc *sc = rxq->rxq_sc;
   8348 
   8349 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8350 		NQRXC_STATUS_DD)) {
   8351 		/* We have processed all of the receive descriptors. */
   8352 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8353 		return false;
   8354 	}
   8355 
   8356 	return true;
   8357 }
   8358 
   8359 static inline bool
   8360 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8361     uint16_t vlantag, struct mbuf *m)
   8362 {
   8363 
   8364 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8365 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8366 		vlan_set_tag(m, le16toh(vlantag));
   8367 	}
   8368 
   8369 	return true;
   8370 }
   8371 
   8372 static inline void
   8373 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8374     uint32_t errors, struct mbuf *m)
   8375 {
   8376 	struct wm_softc *sc = rxq->rxq_sc;
   8377 
   8378 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8379 		if (wm_rxdesc_is_set_status(sc, status,
   8380 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8381 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8382 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8383 			if (wm_rxdesc_is_set_error(sc, errors,
   8384 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8385 				m->m_pkthdr.csum_flags |=
   8386 					M_CSUM_IPv4_BAD;
   8387 		}
   8388 		if (wm_rxdesc_is_set_status(sc, status,
   8389 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8390 			/*
   8391 			 * Note: we don't know if this was TCP or UDP,
   8392 			 * so we just set both bits, and expect the
   8393 			 * upper layers to deal.
   8394 			 */
   8395 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8396 			m->m_pkthdr.csum_flags |=
   8397 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8398 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8399 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8400 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8401 				m->m_pkthdr.csum_flags |=
   8402 					M_CSUM_TCP_UDP_BAD;
   8403 		}
   8404 	}
   8405 }
   8406 
   8407 /*
   8408  * wm_rxeof:
   8409  *
   8410  *	Helper; handle receive interrupts.
   8411  */
   8412 static bool
   8413 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8414 {
   8415 	struct wm_softc *sc = rxq->rxq_sc;
   8416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8417 	struct wm_rxsoft *rxs;
   8418 	struct mbuf *m;
   8419 	int i, len;
   8420 	int count = 0;
   8421 	uint32_t status, errors;
   8422 	uint16_t vlantag;
   8423 	bool more = false;
   8424 
   8425 	KASSERT(mutex_owned(rxq->rxq_lock));
   8426 
   8427 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8428 		if (limit-- == 0) {
   8429 			rxq->rxq_ptr = i;
   8430 			more = true;
   8431 			DPRINTF(WM_DEBUG_RX,
   8432 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8433 				device_xname(sc->sc_dev), i));
   8434 			break;
   8435 		}
   8436 
   8437 		rxs = &rxq->rxq_soft[i];
   8438 
   8439 		DPRINTF(WM_DEBUG_RX,
   8440 		    ("%s: RX: checking descriptor %d\n",
   8441 		    device_xname(sc->sc_dev), i));
   8442 		wm_cdrxsync(rxq, i,
   8443 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8444 
   8445 		status = wm_rxdesc_get_status(rxq, i);
   8446 		errors = wm_rxdesc_get_errors(rxq, i);
   8447 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8448 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8449 #ifdef WM_DEBUG
   8450 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8451 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8452 #endif
   8453 
   8454 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8455 			/*
   8456 			 * Update the receive pointer holding rxq_lock
   8457 			 * consistent with increment counter.
   8458 			 */
   8459 			rxq->rxq_ptr = i;
   8460 			break;
   8461 		}
   8462 
   8463 		count++;
   8464 		if (__predict_false(rxq->rxq_discard)) {
   8465 			DPRINTF(WM_DEBUG_RX,
   8466 			    ("%s: RX: discarding contents of descriptor %d\n",
   8467 			    device_xname(sc->sc_dev), i));
   8468 			wm_init_rxdesc(rxq, i);
   8469 			if (wm_rxdesc_is_eop(rxq, status)) {
   8470 				/* Reset our state. */
   8471 				DPRINTF(WM_DEBUG_RX,
   8472 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8473 				    device_xname(sc->sc_dev)));
   8474 				rxq->rxq_discard = 0;
   8475 			}
   8476 			continue;
   8477 		}
   8478 
   8479 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8480 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8481 
   8482 		m = rxs->rxs_mbuf;
   8483 
   8484 		/*
   8485 		 * Add a new receive buffer to the ring, unless of
   8486 		 * course the length is zero. Treat the latter as a
   8487 		 * failed mapping.
   8488 		 */
   8489 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8490 			/*
   8491 			 * Failed, throw away what we've done so
   8492 			 * far, and discard the rest of the packet.
   8493 			 */
   8494 			ifp->if_ierrors++;
   8495 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8496 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8497 			wm_init_rxdesc(rxq, i);
   8498 			if (!wm_rxdesc_is_eop(rxq, status))
   8499 				rxq->rxq_discard = 1;
   8500 			if (rxq->rxq_head != NULL)
   8501 				m_freem(rxq->rxq_head);
   8502 			WM_RXCHAIN_RESET(rxq);
   8503 			DPRINTF(WM_DEBUG_RX,
   8504 			    ("%s: RX: Rx buffer allocation failed, "
   8505 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8506 			    rxq->rxq_discard ? " (discard)" : ""));
   8507 			continue;
   8508 		}
   8509 
   8510 		m->m_len = len;
   8511 		rxq->rxq_len += len;
   8512 		DPRINTF(WM_DEBUG_RX,
   8513 		    ("%s: RX: buffer at %p len %d\n",
   8514 		    device_xname(sc->sc_dev), m->m_data, len));
   8515 
   8516 		/* If this is not the end of the packet, keep looking. */
   8517 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8518 			WM_RXCHAIN_LINK(rxq, m);
   8519 			DPRINTF(WM_DEBUG_RX,
   8520 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8521 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8522 			continue;
   8523 		}
   8524 
   8525 		/*
   8526 		 * Okay, we have the entire packet now.  The chip is
   8527 		 * configured to include the FCS except I350 and I21[01]
   8528 		 * (not all chips can be configured to strip it),
   8529 		 * so we need to trim it.
   8530 		 * May need to adjust length of previous mbuf in the
   8531 		 * chain if the current mbuf is too short.
   8532 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8533 		 * is always set in I350, so we don't trim it.
   8534 		 */
   8535 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8536 		    && (sc->sc_type != WM_T_I210)
   8537 		    && (sc->sc_type != WM_T_I211)) {
   8538 			if (m->m_len < ETHER_CRC_LEN) {
   8539 				rxq->rxq_tail->m_len
   8540 				    -= (ETHER_CRC_LEN - m->m_len);
   8541 				m->m_len = 0;
   8542 			} else
   8543 				m->m_len -= ETHER_CRC_LEN;
   8544 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8545 		} else
   8546 			len = rxq->rxq_len;
   8547 
   8548 		WM_RXCHAIN_LINK(rxq, m);
   8549 
   8550 		*rxq->rxq_tailp = NULL;
   8551 		m = rxq->rxq_head;
   8552 
   8553 		WM_RXCHAIN_RESET(rxq);
   8554 
   8555 		DPRINTF(WM_DEBUG_RX,
   8556 		    ("%s: RX: have entire packet, len -> %d\n",
   8557 		    device_xname(sc->sc_dev), len));
   8558 
   8559 		/* If an error occurred, update stats and drop the packet. */
   8560 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8561 			m_freem(m);
   8562 			continue;
   8563 		}
   8564 
   8565 		/* No errors.  Receive the packet. */
   8566 		m_set_rcvif(m, ifp);
   8567 		m->m_pkthdr.len = len;
   8568 		/*
   8569 		 * TODO
   8570 		 * should be save rsshash and rsstype to this mbuf.
   8571 		 */
   8572 		DPRINTF(WM_DEBUG_RX,
   8573 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8574 			device_xname(sc->sc_dev), rsstype, rsshash));
   8575 
   8576 		/*
   8577 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8578 		 * for us.  Associate the tag with the packet.
   8579 		 */
   8580 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8581 			continue;
   8582 
   8583 		/* Set up checksum info for this packet. */
   8584 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8585 		/*
   8586 		 * Update the receive pointer holding rxq_lock consistent with
   8587 		 * increment counter.
   8588 		 */
   8589 		rxq->rxq_ptr = i;
   8590 		rxq->rxq_packets++;
   8591 		rxq->rxq_bytes += len;
   8592 		mutex_exit(rxq->rxq_lock);
   8593 
   8594 		/* Pass it on. */
   8595 		if_percpuq_enqueue(sc->sc_ipq, m);
   8596 
   8597 		mutex_enter(rxq->rxq_lock);
   8598 
   8599 		if (rxq->rxq_stopping)
   8600 			break;
   8601 	}
   8602 
   8603 	if (count != 0)
   8604 		rnd_add_uint32(&sc->rnd_source, count);
   8605 
   8606 	DPRINTF(WM_DEBUG_RX,
   8607 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8608 
   8609 	return more;
   8610 }
   8611 
   8612 /*
   8613  * wm_linkintr_gmii:
   8614  *
   8615  *	Helper; handle link interrupts for GMII.
   8616  */
   8617 static void
   8618 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8619 {
   8620 
   8621 	KASSERT(WM_CORE_LOCKED(sc));
   8622 
   8623 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8624 		__func__));
   8625 
   8626 	if (icr & ICR_LSC) {
   8627 		uint32_t reg;
   8628 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8629 
   8630 		if ((status & STATUS_LU) != 0) {
   8631 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8632 				device_xname(sc->sc_dev),
   8633 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8634 		} else {
   8635 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8636 				device_xname(sc->sc_dev)));
   8637 		}
   8638 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8639 			wm_gig_downshift_workaround_ich8lan(sc);
   8640 
   8641 		if ((sc->sc_type == WM_T_ICH8)
   8642 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8643 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8644 		}
   8645 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8646 			device_xname(sc->sc_dev)));
   8647 		mii_pollstat(&sc->sc_mii);
   8648 		if (sc->sc_type == WM_T_82543) {
   8649 			int miistatus, active;
   8650 
   8651 			/*
   8652 			 * With 82543, we need to force speed and
   8653 			 * duplex on the MAC equal to what the PHY
   8654 			 * speed and duplex configuration is.
   8655 			 */
   8656 			miistatus = sc->sc_mii.mii_media_status;
   8657 
   8658 			if (miistatus & IFM_ACTIVE) {
   8659 				active = sc->sc_mii.mii_media_active;
   8660 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8661 				switch (IFM_SUBTYPE(active)) {
   8662 				case IFM_10_T:
   8663 					sc->sc_ctrl |= CTRL_SPEED_10;
   8664 					break;
   8665 				case IFM_100_TX:
   8666 					sc->sc_ctrl |= CTRL_SPEED_100;
   8667 					break;
   8668 				case IFM_1000_T:
   8669 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8670 					break;
   8671 				default:
   8672 					/*
   8673 					 * fiber?
   8674 					 * Shoud not enter here.
   8675 					 */
   8676 					printf("unknown media (%x)\n", active);
   8677 					break;
   8678 				}
   8679 				if (active & IFM_FDX)
   8680 					sc->sc_ctrl |= CTRL_FD;
   8681 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8682 			}
   8683 		} else if (sc->sc_type == WM_T_PCH) {
   8684 			wm_k1_gig_workaround_hv(sc,
   8685 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8686 		}
   8687 
   8688 		if ((sc->sc_phytype == WMPHY_82578)
   8689 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8690 			== IFM_1000_T)) {
   8691 
   8692 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8693 				delay(200*1000); /* XXX too big */
   8694 
   8695 				/* Link stall fix for link up */
   8696 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8697 				    HV_MUX_DATA_CTRL,
   8698 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8699 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8700 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8701 				    HV_MUX_DATA_CTRL,
   8702 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8703 			}
   8704 		}
   8705 		/*
   8706 		 * I217 Packet Loss issue:
   8707 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8708 		 * on power up.
   8709 		 * Set the Beacon Duration for I217 to 8 usec
   8710 		 */
   8711 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8712 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8713 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8714 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8715 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8716 		}
   8717 
   8718 		/* XXX Work-around I218 hang issue */
   8719 		/* e1000_k1_workaround_lpt_lp() */
   8720 
   8721 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8722 			/*
   8723 			 * Set platform power management values for Latency
   8724 			 * Tolerance Reporting (LTR)
   8725 			 */
   8726 			wm_platform_pm_pch_lpt(sc,
   8727 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8728 				    != 0));
   8729 		}
   8730 
   8731 		/* FEXTNVM6 K1-off workaround */
   8732 		if (sc->sc_type == WM_T_PCH_SPT) {
   8733 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8734 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8735 			    & FEXTNVM6_K1_OFF_ENABLE)
   8736 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8737 			else
   8738 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8739 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8740 		}
   8741 	} else if (icr & ICR_RXSEQ) {
   8742 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8743 			device_xname(sc->sc_dev)));
   8744 	}
   8745 }
   8746 
   8747 /*
   8748  * wm_linkintr_tbi:
   8749  *
   8750  *	Helper; handle link interrupts for TBI mode.
   8751  */
   8752 static void
   8753 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8754 {
   8755 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8756 	uint32_t status;
   8757 
   8758 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8759 		__func__));
   8760 
   8761 	status = CSR_READ(sc, WMREG_STATUS);
   8762 	if (icr & ICR_LSC) {
   8763 		if (status & STATUS_LU) {
   8764 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8765 			    device_xname(sc->sc_dev),
   8766 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8767 			/*
   8768 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8769 			 * so we should update sc->sc_ctrl
   8770 			 */
   8771 
   8772 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8773 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8774 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8775 			if (status & STATUS_FD)
   8776 				sc->sc_tctl |=
   8777 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8778 			else
   8779 				sc->sc_tctl |=
   8780 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8781 			if (sc->sc_ctrl & CTRL_TFCE)
   8782 				sc->sc_fcrtl |= FCRTL_XONE;
   8783 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8784 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8785 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8786 				      sc->sc_fcrtl);
   8787 			sc->sc_tbi_linkup = 1;
   8788 			if_link_state_change(ifp, LINK_STATE_UP);
   8789 		} else {
   8790 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8791 			    device_xname(sc->sc_dev)));
   8792 			sc->sc_tbi_linkup = 0;
   8793 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8794 		}
   8795 		/* Update LED */
   8796 		wm_tbi_serdes_set_linkled(sc);
   8797 	} else if (icr & ICR_RXSEQ) {
   8798 		DPRINTF(WM_DEBUG_LINK,
   8799 		    ("%s: LINK: Receive sequence error\n",
   8800 		    device_xname(sc->sc_dev)));
   8801 	}
   8802 }
   8803 
   8804 /*
   8805  * wm_linkintr_serdes:
   8806  *
   8807  *	Helper; handle link interrupts for TBI mode.
   8808  */
   8809 static void
   8810 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8811 {
   8812 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8813 	struct mii_data *mii = &sc->sc_mii;
   8814 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8815 	uint32_t pcs_adv, pcs_lpab, reg;
   8816 
   8817 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8818 		__func__));
   8819 
   8820 	if (icr & ICR_LSC) {
   8821 		/* Check PCS */
   8822 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8823 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8824 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8825 				device_xname(sc->sc_dev)));
   8826 			mii->mii_media_status |= IFM_ACTIVE;
   8827 			sc->sc_tbi_linkup = 1;
   8828 			if_link_state_change(ifp, LINK_STATE_UP);
   8829 		} else {
   8830 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8831 				device_xname(sc->sc_dev)));
   8832 			mii->mii_media_status |= IFM_NONE;
   8833 			sc->sc_tbi_linkup = 0;
   8834 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8835 			wm_tbi_serdes_set_linkled(sc);
   8836 			return;
   8837 		}
   8838 		mii->mii_media_active |= IFM_1000_SX;
   8839 		if ((reg & PCS_LSTS_FDX) != 0)
   8840 			mii->mii_media_active |= IFM_FDX;
   8841 		else
   8842 			mii->mii_media_active |= IFM_HDX;
   8843 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8844 			/* Check flow */
   8845 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8846 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8847 				DPRINTF(WM_DEBUG_LINK,
   8848 				    ("XXX LINKOK but not ACOMP\n"));
   8849 				return;
   8850 			}
   8851 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8852 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8853 			DPRINTF(WM_DEBUG_LINK,
   8854 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8855 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8856 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8857 				mii->mii_media_active |= IFM_FLOW
   8858 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8859 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8860 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8861 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8862 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8863 				mii->mii_media_active |= IFM_FLOW
   8864 				    | IFM_ETH_TXPAUSE;
   8865 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8866 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8867 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8868 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8869 				mii->mii_media_active |= IFM_FLOW
   8870 				    | IFM_ETH_RXPAUSE;
   8871 		}
   8872 		/* Update LED */
   8873 		wm_tbi_serdes_set_linkled(sc);
   8874 	} else {
   8875 		DPRINTF(WM_DEBUG_LINK,
   8876 		    ("%s: LINK: Receive sequence error\n",
   8877 		    device_xname(sc->sc_dev)));
   8878 	}
   8879 }
   8880 
   8881 /*
   8882  * wm_linkintr:
   8883  *
   8884  *	Helper; handle link interrupts.
   8885  */
   8886 static void
   8887 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8888 {
   8889 
   8890 	KASSERT(WM_CORE_LOCKED(sc));
   8891 
   8892 	if (sc->sc_flags & WM_F_HAS_MII)
   8893 		wm_linkintr_gmii(sc, icr);
   8894 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8895 	    && (sc->sc_type >= WM_T_82575))
   8896 		wm_linkintr_serdes(sc, icr);
   8897 	else
   8898 		wm_linkintr_tbi(sc, icr);
   8899 }
   8900 
   8901 /*
   8902  * wm_intr_legacy:
   8903  *
   8904  *	Interrupt service routine for INTx and MSI.
   8905  */
   8906 static int
   8907 wm_intr_legacy(void *arg)
   8908 {
   8909 	struct wm_softc *sc = arg;
   8910 	struct wm_queue *wmq = &sc->sc_queue[0];
   8911 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8912 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8913 	uint32_t icr, rndval = 0;
   8914 	int handled = 0;
   8915 
   8916 	while (1 /* CONSTCOND */) {
   8917 		icr = CSR_READ(sc, WMREG_ICR);
   8918 		if ((icr & sc->sc_icr) == 0)
   8919 			break;
   8920 		if (handled == 0) {
   8921 			DPRINTF(WM_DEBUG_TX,
   8922 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8923 		}
   8924 		if (rndval == 0)
   8925 			rndval = icr;
   8926 
   8927 		mutex_enter(rxq->rxq_lock);
   8928 
   8929 		if (rxq->rxq_stopping) {
   8930 			mutex_exit(rxq->rxq_lock);
   8931 			break;
   8932 		}
   8933 
   8934 		handled = 1;
   8935 
   8936 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8937 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8938 			DPRINTF(WM_DEBUG_RX,
   8939 			    ("%s: RX: got Rx intr 0x%08x\n",
   8940 			    device_xname(sc->sc_dev),
   8941 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8942 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8943 		}
   8944 #endif
   8945 		/*
   8946 		 * wm_rxeof() does *not* call upper layer functions directly,
   8947 		 * as if_percpuq_enqueue() just call softint_schedule().
   8948 		 * So, we can call wm_rxeof() in interrupt context.
   8949 		 */
   8950 		wm_rxeof(rxq, UINT_MAX);
   8951 
   8952 		mutex_exit(rxq->rxq_lock);
   8953 		mutex_enter(txq->txq_lock);
   8954 
   8955 		if (txq->txq_stopping) {
   8956 			mutex_exit(txq->txq_lock);
   8957 			break;
   8958 		}
   8959 
   8960 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8961 		if (icr & ICR_TXDW) {
   8962 			DPRINTF(WM_DEBUG_TX,
   8963 			    ("%s: TX: got TXDW interrupt\n",
   8964 			    device_xname(sc->sc_dev)));
   8965 			WM_Q_EVCNT_INCR(txq, txdw);
   8966 		}
   8967 #endif
   8968 		wm_txeof(txq, UINT_MAX);
   8969 
   8970 		mutex_exit(txq->txq_lock);
   8971 		WM_CORE_LOCK(sc);
   8972 
   8973 		if (sc->sc_core_stopping) {
   8974 			WM_CORE_UNLOCK(sc);
   8975 			break;
   8976 		}
   8977 
   8978 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8979 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8980 			wm_linkintr(sc, icr);
   8981 		}
   8982 
   8983 		WM_CORE_UNLOCK(sc);
   8984 
   8985 		if (icr & ICR_RXO) {
   8986 #if defined(WM_DEBUG)
   8987 			log(LOG_WARNING, "%s: Receive overrun\n",
   8988 			    device_xname(sc->sc_dev));
   8989 #endif /* defined(WM_DEBUG) */
   8990 		}
   8991 	}
   8992 
   8993 	rnd_add_uint32(&sc->rnd_source, rndval);
   8994 
   8995 	if (handled) {
   8996 		/* Try to get more packets going. */
   8997 		softint_schedule(wmq->wmq_si);
   8998 	}
   8999 
   9000 	return handled;
   9001 }
   9002 
   9003 static inline void
   9004 wm_txrxintr_disable(struct wm_queue *wmq)
   9005 {
   9006 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9007 
   9008 	if (sc->sc_type == WM_T_82574)
   9009 		CSR_WRITE(sc, WMREG_IMC,
   9010 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9011 	else if (sc->sc_type == WM_T_82575)
   9012 		CSR_WRITE(sc, WMREG_EIMC,
   9013 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9014 	else
   9015 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9016 }
   9017 
   9018 static inline void
   9019 wm_txrxintr_enable(struct wm_queue *wmq)
   9020 {
   9021 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9022 
   9023 	wm_itrs_calculate(sc, wmq);
   9024 
   9025 	/*
   9026 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9027 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9028 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9029 	 * while each wm_handle_queue(wmq) is runnig.
   9030 	 */
   9031 	if (sc->sc_type == WM_T_82574)
   9032 		CSR_WRITE(sc, WMREG_IMS,
   9033 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9034 	else if (sc->sc_type == WM_T_82575)
   9035 		CSR_WRITE(sc, WMREG_EIMS,
   9036 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9037 	else
   9038 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9039 }
   9040 
   9041 static int
   9042 wm_txrxintr_msix(void *arg)
   9043 {
   9044 	struct wm_queue *wmq = arg;
   9045 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9046 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9047 	struct wm_softc *sc = txq->txq_sc;
   9048 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9049 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9050 	bool txmore;
   9051 	bool rxmore;
   9052 
   9053 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9054 
   9055 	DPRINTF(WM_DEBUG_TX,
   9056 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9057 
   9058 	wm_txrxintr_disable(wmq);
   9059 
   9060 	mutex_enter(txq->txq_lock);
   9061 
   9062 	if (txq->txq_stopping) {
   9063 		mutex_exit(txq->txq_lock);
   9064 		return 0;
   9065 	}
   9066 
   9067 	WM_Q_EVCNT_INCR(txq, txdw);
   9068 	txmore = wm_txeof(txq, txlimit);
   9069 	/* wm_deferred start() is done in wm_handle_queue(). */
   9070 	mutex_exit(txq->txq_lock);
   9071 
   9072 	DPRINTF(WM_DEBUG_RX,
   9073 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9074 	mutex_enter(rxq->rxq_lock);
   9075 
   9076 	if (rxq->rxq_stopping) {
   9077 		mutex_exit(rxq->rxq_lock);
   9078 		return 0;
   9079 	}
   9080 
   9081 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9082 	rxmore = wm_rxeof(rxq, rxlimit);
   9083 	mutex_exit(rxq->rxq_lock);
   9084 
   9085 	wm_itrs_writereg(sc, wmq);
   9086 
   9087 	if (txmore || rxmore)
   9088 		softint_schedule(wmq->wmq_si);
   9089 	else
   9090 		wm_txrxintr_enable(wmq);
   9091 
   9092 	return 1;
   9093 }
   9094 
   9095 static void
   9096 wm_handle_queue(void *arg)
   9097 {
   9098 	struct wm_queue *wmq = arg;
   9099 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9100 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9101 	struct wm_softc *sc = txq->txq_sc;
   9102 	u_int txlimit = sc->sc_tx_process_limit;
   9103 	u_int rxlimit = sc->sc_rx_process_limit;
   9104 	bool txmore;
   9105 	bool rxmore;
   9106 
   9107 	mutex_enter(txq->txq_lock);
   9108 	if (txq->txq_stopping) {
   9109 		mutex_exit(txq->txq_lock);
   9110 		return;
   9111 	}
   9112 	txmore = wm_txeof(txq, txlimit);
   9113 	wm_deferred_start_locked(txq);
   9114 	mutex_exit(txq->txq_lock);
   9115 
   9116 	mutex_enter(rxq->rxq_lock);
   9117 	if (rxq->rxq_stopping) {
   9118 		mutex_exit(rxq->rxq_lock);
   9119 		return;
   9120 	}
   9121 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9122 	rxmore = wm_rxeof(rxq, rxlimit);
   9123 	mutex_exit(rxq->rxq_lock);
   9124 
   9125 	if (txmore || rxmore)
   9126 		softint_schedule(wmq->wmq_si);
   9127 	else
   9128 		wm_txrxintr_enable(wmq);
   9129 }
   9130 
   9131 /*
   9132  * wm_linkintr_msix:
   9133  *
   9134  *	Interrupt service routine for link status change for MSI-X.
   9135  */
   9136 static int
   9137 wm_linkintr_msix(void *arg)
   9138 {
   9139 	struct wm_softc *sc = arg;
   9140 	uint32_t reg;
   9141 	bool has_rxo;
   9142 
   9143 	DPRINTF(WM_DEBUG_LINK,
   9144 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9145 
   9146 	reg = CSR_READ(sc, WMREG_ICR);
   9147 	WM_CORE_LOCK(sc);
   9148 	if (sc->sc_core_stopping)
   9149 		goto out;
   9150 
   9151 	if ((reg & ICR_LSC) != 0) {
   9152 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9153 		wm_linkintr(sc, ICR_LSC);
   9154 	}
   9155 
   9156 	/*
   9157 	 * XXX 82574 MSI-X mode workaround
   9158 	 *
   9159 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9160 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9161 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9162 	 * interrupts by writing WMREG_ICS to process receive packets.
   9163 	 */
   9164 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9165 #if defined(WM_DEBUG)
   9166 		log(LOG_WARNING, "%s: Receive overrun\n",
   9167 		    device_xname(sc->sc_dev));
   9168 #endif /* defined(WM_DEBUG) */
   9169 
   9170 		has_rxo = true;
   9171 		/*
   9172 		 * The RXO interrupt is very high rate when receive traffic is
   9173 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9174 		 * interrupts. ICR_OTHER will be enabled at the end of
   9175 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9176 		 * ICR_RXQ(1) interrupts.
   9177 		 */
   9178 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9179 
   9180 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9181 	}
   9182 
   9183 
   9184 
   9185 out:
   9186 	WM_CORE_UNLOCK(sc);
   9187 
   9188 	if (sc->sc_type == WM_T_82574) {
   9189 		if (!has_rxo)
   9190 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9191 		else
   9192 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9193 	} else if (sc->sc_type == WM_T_82575)
   9194 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9195 	else
   9196 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9197 
   9198 	return 1;
   9199 }
   9200 
   9201 /*
   9202  * Media related.
   9203  * GMII, SGMII, TBI (and SERDES)
   9204  */
   9205 
   9206 /* Common */
   9207 
   9208 /*
   9209  * wm_tbi_serdes_set_linkled:
   9210  *
   9211  *	Update the link LED on TBI and SERDES devices.
   9212  */
   9213 static void
   9214 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9215 {
   9216 
   9217 	if (sc->sc_tbi_linkup)
   9218 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9219 	else
   9220 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9221 
   9222 	/* 82540 or newer devices are active low */
   9223 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9224 
   9225 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9226 }
   9227 
   9228 /* GMII related */
   9229 
   9230 /*
   9231  * wm_gmii_reset:
   9232  *
   9233  *	Reset the PHY.
   9234  */
   9235 static void
   9236 wm_gmii_reset(struct wm_softc *sc)
   9237 {
   9238 	uint32_t reg;
   9239 	int rv;
   9240 
   9241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9242 		device_xname(sc->sc_dev), __func__));
   9243 
   9244 	rv = sc->phy.acquire(sc);
   9245 	if (rv != 0) {
   9246 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9247 		    __func__);
   9248 		return;
   9249 	}
   9250 
   9251 	switch (sc->sc_type) {
   9252 	case WM_T_82542_2_0:
   9253 	case WM_T_82542_2_1:
   9254 		/* null */
   9255 		break;
   9256 	case WM_T_82543:
   9257 		/*
   9258 		 * With 82543, we need to force speed and duplex on the MAC
   9259 		 * equal to what the PHY speed and duplex configuration is.
   9260 		 * In addition, we need to perform a hardware reset on the PHY
   9261 		 * to take it out of reset.
   9262 		 */
   9263 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9264 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9265 
   9266 		/* The PHY reset pin is active-low. */
   9267 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9268 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9269 		    CTRL_EXT_SWDPIN(4));
   9270 		reg |= CTRL_EXT_SWDPIO(4);
   9271 
   9272 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9273 		CSR_WRITE_FLUSH(sc);
   9274 		delay(10*1000);
   9275 
   9276 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9277 		CSR_WRITE_FLUSH(sc);
   9278 		delay(150);
   9279 #if 0
   9280 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9281 #endif
   9282 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9283 		break;
   9284 	case WM_T_82544:	/* reset 10000us */
   9285 	case WM_T_82540:
   9286 	case WM_T_82545:
   9287 	case WM_T_82545_3:
   9288 	case WM_T_82546:
   9289 	case WM_T_82546_3:
   9290 	case WM_T_82541:
   9291 	case WM_T_82541_2:
   9292 	case WM_T_82547:
   9293 	case WM_T_82547_2:
   9294 	case WM_T_82571:	/* reset 100us */
   9295 	case WM_T_82572:
   9296 	case WM_T_82573:
   9297 	case WM_T_82574:
   9298 	case WM_T_82575:
   9299 	case WM_T_82576:
   9300 	case WM_T_82580:
   9301 	case WM_T_I350:
   9302 	case WM_T_I354:
   9303 	case WM_T_I210:
   9304 	case WM_T_I211:
   9305 	case WM_T_82583:
   9306 	case WM_T_80003:
   9307 		/* generic reset */
   9308 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9309 		CSR_WRITE_FLUSH(sc);
   9310 		delay(20000);
   9311 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9312 		CSR_WRITE_FLUSH(sc);
   9313 		delay(20000);
   9314 
   9315 		if ((sc->sc_type == WM_T_82541)
   9316 		    || (sc->sc_type == WM_T_82541_2)
   9317 		    || (sc->sc_type == WM_T_82547)
   9318 		    || (sc->sc_type == WM_T_82547_2)) {
   9319 			/* workaround for igp are done in igp_reset() */
   9320 			/* XXX add code to set LED after phy reset */
   9321 		}
   9322 		break;
   9323 	case WM_T_ICH8:
   9324 	case WM_T_ICH9:
   9325 	case WM_T_ICH10:
   9326 	case WM_T_PCH:
   9327 	case WM_T_PCH2:
   9328 	case WM_T_PCH_LPT:
   9329 	case WM_T_PCH_SPT:
   9330 	case WM_T_PCH_CNP:
   9331 		/* generic reset */
   9332 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9333 		CSR_WRITE_FLUSH(sc);
   9334 		delay(100);
   9335 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9336 		CSR_WRITE_FLUSH(sc);
   9337 		delay(150);
   9338 		break;
   9339 	default:
   9340 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9341 		    __func__);
   9342 		break;
   9343 	}
   9344 
   9345 	sc->phy.release(sc);
   9346 
   9347 	/* get_cfg_done */
   9348 	wm_get_cfg_done(sc);
   9349 
   9350 	/* extra setup */
   9351 	switch (sc->sc_type) {
   9352 	case WM_T_82542_2_0:
   9353 	case WM_T_82542_2_1:
   9354 	case WM_T_82543:
   9355 	case WM_T_82544:
   9356 	case WM_T_82540:
   9357 	case WM_T_82545:
   9358 	case WM_T_82545_3:
   9359 	case WM_T_82546:
   9360 	case WM_T_82546_3:
   9361 	case WM_T_82541_2:
   9362 	case WM_T_82547_2:
   9363 	case WM_T_82571:
   9364 	case WM_T_82572:
   9365 	case WM_T_82573:
   9366 	case WM_T_82574:
   9367 	case WM_T_82583:
   9368 	case WM_T_82575:
   9369 	case WM_T_82576:
   9370 	case WM_T_82580:
   9371 	case WM_T_I350:
   9372 	case WM_T_I354:
   9373 	case WM_T_I210:
   9374 	case WM_T_I211:
   9375 	case WM_T_80003:
   9376 		/* null */
   9377 		break;
   9378 	case WM_T_82541:
   9379 	case WM_T_82547:
   9380 		/* XXX Configure actively LED after PHY reset */
   9381 		break;
   9382 	case WM_T_ICH8:
   9383 	case WM_T_ICH9:
   9384 	case WM_T_ICH10:
   9385 	case WM_T_PCH:
   9386 	case WM_T_PCH2:
   9387 	case WM_T_PCH_LPT:
   9388 	case WM_T_PCH_SPT:
   9389 	case WM_T_PCH_CNP:
   9390 		wm_phy_post_reset(sc);
   9391 		break;
   9392 	default:
   9393 		panic("%s: unknown type\n", __func__);
   9394 		break;
   9395 	}
   9396 }
   9397 
   9398 /*
   9399  * Setup sc_phytype and mii_{read|write}reg.
   9400  *
   9401  *  To identify PHY type, correct read/write function should be selected.
   9402  * To select correct read/write function, PCI ID or MAC type are required
   9403  * without accessing PHY registers.
   9404  *
   9405  *  On the first call of this function, PHY ID is not known yet. Check
   9406  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9407  * result might be incorrect.
   9408  *
   9409  *  In the second call, PHY OUI and model is used to identify PHY type.
   9410  * It might not be perfpect because of the lack of compared entry, but it
   9411  * would be better than the first call.
   9412  *
   9413  *  If the detected new result and previous assumption is different,
   9414  * diagnous message will be printed.
   9415  */
   9416 static void
   9417 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9418     uint16_t phy_model)
   9419 {
   9420 	device_t dev = sc->sc_dev;
   9421 	struct mii_data *mii = &sc->sc_mii;
   9422 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9423 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9424 	mii_readreg_t new_readreg;
   9425 	mii_writereg_t new_writereg;
   9426 
   9427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9428 		device_xname(sc->sc_dev), __func__));
   9429 
   9430 	if (mii->mii_readreg == NULL) {
   9431 		/*
   9432 		 *  This is the first call of this function. For ICH and PCH
   9433 		 * variants, it's difficult to determine the PHY access method
   9434 		 * by sc_type, so use the PCI product ID for some devices.
   9435 		 */
   9436 
   9437 		switch (sc->sc_pcidevid) {
   9438 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9439 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9440 			/* 82577 */
   9441 			new_phytype = WMPHY_82577;
   9442 			break;
   9443 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9444 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9445 			/* 82578 */
   9446 			new_phytype = WMPHY_82578;
   9447 			break;
   9448 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9449 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9450 			/* 82579 */
   9451 			new_phytype = WMPHY_82579;
   9452 			break;
   9453 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9454 		case PCI_PRODUCT_INTEL_82801I_BM:
   9455 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9456 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9457 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9458 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9459 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9460 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9461 			/* ICH8, 9, 10 with 82567 */
   9462 			new_phytype = WMPHY_BM;
   9463 			break;
   9464 		default:
   9465 			break;
   9466 		}
   9467 	} else {
   9468 		/* It's not the first call. Use PHY OUI and model */
   9469 		switch (phy_oui) {
   9470 		case MII_OUI_ATHEROS: /* XXX ??? */
   9471 			switch (phy_model) {
   9472 			case 0x0004: /* XXX */
   9473 				new_phytype = WMPHY_82578;
   9474 				break;
   9475 			default:
   9476 				break;
   9477 			}
   9478 			break;
   9479 		case MII_OUI_xxMARVELL:
   9480 			switch (phy_model) {
   9481 			case MII_MODEL_xxMARVELL_I210:
   9482 				new_phytype = WMPHY_I210;
   9483 				break;
   9484 			case MII_MODEL_xxMARVELL_E1011:
   9485 			case MII_MODEL_xxMARVELL_E1000_3:
   9486 			case MII_MODEL_xxMARVELL_E1000_5:
   9487 			case MII_MODEL_xxMARVELL_E1112:
   9488 				new_phytype = WMPHY_M88;
   9489 				break;
   9490 			case MII_MODEL_xxMARVELL_E1149:
   9491 				new_phytype = WMPHY_BM;
   9492 				break;
   9493 			case MII_MODEL_xxMARVELL_E1111:
   9494 			case MII_MODEL_xxMARVELL_I347:
   9495 			case MII_MODEL_xxMARVELL_E1512:
   9496 			case MII_MODEL_xxMARVELL_E1340M:
   9497 			case MII_MODEL_xxMARVELL_E1543:
   9498 				new_phytype = WMPHY_M88;
   9499 				break;
   9500 			case MII_MODEL_xxMARVELL_I82563:
   9501 				new_phytype = WMPHY_GG82563;
   9502 				break;
   9503 			default:
   9504 				break;
   9505 			}
   9506 			break;
   9507 		case MII_OUI_INTEL:
   9508 			switch (phy_model) {
   9509 			case MII_MODEL_INTEL_I82577:
   9510 				new_phytype = WMPHY_82577;
   9511 				break;
   9512 			case MII_MODEL_INTEL_I82579:
   9513 				new_phytype = WMPHY_82579;
   9514 				break;
   9515 			case MII_MODEL_INTEL_I217:
   9516 				new_phytype = WMPHY_I217;
   9517 				break;
   9518 			case MII_MODEL_INTEL_I82580:
   9519 			case MII_MODEL_INTEL_I350:
   9520 				new_phytype = WMPHY_82580;
   9521 				break;
   9522 			default:
   9523 				break;
   9524 			}
   9525 			break;
   9526 		case MII_OUI_yyINTEL:
   9527 			switch (phy_model) {
   9528 			case MII_MODEL_yyINTEL_I82562G:
   9529 			case MII_MODEL_yyINTEL_I82562EM:
   9530 			case MII_MODEL_yyINTEL_I82562ET:
   9531 				new_phytype = WMPHY_IFE;
   9532 				break;
   9533 			case MII_MODEL_yyINTEL_IGP01E1000:
   9534 				new_phytype = WMPHY_IGP;
   9535 				break;
   9536 			case MII_MODEL_yyINTEL_I82566:
   9537 				new_phytype = WMPHY_IGP_3;
   9538 				break;
   9539 			default:
   9540 				break;
   9541 			}
   9542 			break;
   9543 		default:
   9544 			break;
   9545 		}
   9546 		if (new_phytype == WMPHY_UNKNOWN)
   9547 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9548 			    __func__);
   9549 
   9550 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9551 		    && (sc->sc_phytype != new_phytype )) {
   9552 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9553 			    "was incorrect. PHY type from PHY ID = %u\n",
   9554 			    sc->sc_phytype, new_phytype);
   9555 		}
   9556 	}
   9557 
   9558 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9559 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9560 		/* SGMII */
   9561 		new_readreg = wm_sgmii_readreg;
   9562 		new_writereg = wm_sgmii_writereg;
   9563 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9564 		/* BM2 (phyaddr == 1) */
   9565 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9566 		    && (new_phytype != WMPHY_BM)
   9567 		    && (new_phytype != WMPHY_UNKNOWN))
   9568 			doubt_phytype = new_phytype;
   9569 		new_phytype = WMPHY_BM;
   9570 		new_readreg = wm_gmii_bm_readreg;
   9571 		new_writereg = wm_gmii_bm_writereg;
   9572 	} else if (sc->sc_type >= WM_T_PCH) {
   9573 		/* All PCH* use _hv_ */
   9574 		new_readreg = wm_gmii_hv_readreg;
   9575 		new_writereg = wm_gmii_hv_writereg;
   9576 	} else if (sc->sc_type >= WM_T_ICH8) {
   9577 		/* non-82567 ICH8, 9 and 10 */
   9578 		new_readreg = wm_gmii_i82544_readreg;
   9579 		new_writereg = wm_gmii_i82544_writereg;
   9580 	} else if (sc->sc_type >= WM_T_80003) {
   9581 		/* 80003 */
   9582 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9583 		    && (new_phytype != WMPHY_GG82563)
   9584 		    && (new_phytype != WMPHY_UNKNOWN))
   9585 			doubt_phytype = new_phytype;
   9586 		new_phytype = WMPHY_GG82563;
   9587 		new_readreg = wm_gmii_i80003_readreg;
   9588 		new_writereg = wm_gmii_i80003_writereg;
   9589 	} else if (sc->sc_type >= WM_T_I210) {
   9590 		/* I210 and I211 */
   9591 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9592 		    && (new_phytype != WMPHY_I210)
   9593 		    && (new_phytype != WMPHY_UNKNOWN))
   9594 			doubt_phytype = new_phytype;
   9595 		new_phytype = WMPHY_I210;
   9596 		new_readreg = wm_gmii_gs40g_readreg;
   9597 		new_writereg = wm_gmii_gs40g_writereg;
   9598 	} else if (sc->sc_type >= WM_T_82580) {
   9599 		/* 82580, I350 and I354 */
   9600 		new_readreg = wm_gmii_82580_readreg;
   9601 		new_writereg = wm_gmii_82580_writereg;
   9602 	} else if (sc->sc_type >= WM_T_82544) {
   9603 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9604 		new_readreg = wm_gmii_i82544_readreg;
   9605 		new_writereg = wm_gmii_i82544_writereg;
   9606 	} else {
   9607 		new_readreg = wm_gmii_i82543_readreg;
   9608 		new_writereg = wm_gmii_i82543_writereg;
   9609 	}
   9610 
   9611 	if (new_phytype == WMPHY_BM) {
   9612 		/* All BM use _bm_ */
   9613 		new_readreg = wm_gmii_bm_readreg;
   9614 		new_writereg = wm_gmii_bm_writereg;
   9615 	}
   9616 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9617 		/* All PCH* use _hv_ */
   9618 		new_readreg = wm_gmii_hv_readreg;
   9619 		new_writereg = wm_gmii_hv_writereg;
   9620 	}
   9621 
   9622 	/* Diag output */
   9623 	if (doubt_phytype != WMPHY_UNKNOWN)
   9624 		aprint_error_dev(dev, "Assumed new PHY type was "
   9625 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9626 		    new_phytype);
   9627 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9628 	    && (sc->sc_phytype != new_phytype ))
   9629 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9630 		    "was incorrect. New PHY type = %u\n",
   9631 		    sc->sc_phytype, new_phytype);
   9632 
   9633 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9634 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9635 
   9636 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9637 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9638 		    "function was incorrect.\n");
   9639 
   9640 	/* Update now */
   9641 	sc->sc_phytype = new_phytype;
   9642 	mii->mii_readreg = new_readreg;
   9643 	mii->mii_writereg = new_writereg;
   9644 }
   9645 
   9646 /*
   9647  * wm_get_phy_id_82575:
   9648  *
   9649  * Return PHY ID. Return -1 if it failed.
   9650  */
   9651 static int
   9652 wm_get_phy_id_82575(struct wm_softc *sc)
   9653 {
   9654 	uint32_t reg;
   9655 	int phyid = -1;
   9656 
   9657 	/* XXX */
   9658 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9659 		return -1;
   9660 
   9661 	if (wm_sgmii_uses_mdio(sc)) {
   9662 		switch (sc->sc_type) {
   9663 		case WM_T_82575:
   9664 		case WM_T_82576:
   9665 			reg = CSR_READ(sc, WMREG_MDIC);
   9666 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9667 			break;
   9668 		case WM_T_82580:
   9669 		case WM_T_I350:
   9670 		case WM_T_I354:
   9671 		case WM_T_I210:
   9672 		case WM_T_I211:
   9673 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9674 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9675 			break;
   9676 		default:
   9677 			return -1;
   9678 		}
   9679 	}
   9680 
   9681 	return phyid;
   9682 }
   9683 
   9684 
   9685 /*
   9686  * wm_gmii_mediainit:
   9687  *
   9688  *	Initialize media for use on 1000BASE-T devices.
   9689  */
   9690 static void
   9691 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9692 {
   9693 	device_t dev = sc->sc_dev;
   9694 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9695 	struct mii_data *mii = &sc->sc_mii;
   9696 	uint32_t reg;
   9697 
   9698 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9699 		device_xname(sc->sc_dev), __func__));
   9700 
   9701 	/* We have GMII. */
   9702 	sc->sc_flags |= WM_F_HAS_MII;
   9703 
   9704 	if (sc->sc_type == WM_T_80003)
   9705 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9706 	else
   9707 		sc->sc_tipg = TIPG_1000T_DFLT;
   9708 
   9709 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9710 	if ((sc->sc_type == WM_T_82580)
   9711 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9712 	    || (sc->sc_type == WM_T_I211)) {
   9713 		reg = CSR_READ(sc, WMREG_PHPM);
   9714 		reg &= ~PHPM_GO_LINK_D;
   9715 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9716 	}
   9717 
   9718 	/*
   9719 	 * Let the chip set speed/duplex on its own based on
   9720 	 * signals from the PHY.
   9721 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9722 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9723 	 */
   9724 	sc->sc_ctrl |= CTRL_SLU;
   9725 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9726 
   9727 	/* Initialize our media structures and probe the GMII. */
   9728 	mii->mii_ifp = ifp;
   9729 
   9730 	mii->mii_statchg = wm_gmii_statchg;
   9731 
   9732 	/* get PHY control from SMBus to PCIe */
   9733 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9734 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9735 	    || (sc->sc_type == WM_T_PCH_CNP))
   9736 		wm_smbustopci(sc);
   9737 
   9738 	wm_gmii_reset(sc);
   9739 
   9740 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9741 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9742 	    wm_gmii_mediastatus);
   9743 
   9744 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9745 	    || (sc->sc_type == WM_T_82580)
   9746 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9747 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9748 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9749 			/* Attach only one port */
   9750 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9751 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9752 		} else {
   9753 			int i, id;
   9754 			uint32_t ctrl_ext;
   9755 
   9756 			id = wm_get_phy_id_82575(sc);
   9757 			if (id != -1) {
   9758 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9759 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9760 			}
   9761 			if ((id == -1)
   9762 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9763 				/* Power on sgmii phy if it is disabled */
   9764 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9765 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9766 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9767 				CSR_WRITE_FLUSH(sc);
   9768 				delay(300*1000); /* XXX too long */
   9769 
   9770 				/* from 1 to 8 */
   9771 				for (i = 1; i < 8; i++)
   9772 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9773 					    0xffffffff, i, MII_OFFSET_ANY,
   9774 					    MIIF_DOPAUSE);
   9775 
   9776 				/* restore previous sfp cage power state */
   9777 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9778 			}
   9779 		}
   9780 	} else {
   9781 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9782 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9783 	}
   9784 
   9785 	/*
   9786 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9787 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9788 	 */
   9789 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9790 		|| (sc->sc_type == WM_T_PCH_SPT)
   9791 		|| (sc->sc_type == WM_T_PCH_CNP))
   9792 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9793 		wm_set_mdio_slow_mode_hv(sc);
   9794 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9795 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9796 	}
   9797 
   9798 	/*
   9799 	 * (For ICH8 variants)
   9800 	 * If PHY detection failed, use BM's r/w function and retry.
   9801 	 */
   9802 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9803 		/* if failed, retry with *_bm_* */
   9804 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9805 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9806 		    sc->sc_phytype);
   9807 		sc->sc_phytype = WMPHY_BM;
   9808 		mii->mii_readreg = wm_gmii_bm_readreg;
   9809 		mii->mii_writereg = wm_gmii_bm_writereg;
   9810 
   9811 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9812 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9813 	}
   9814 
   9815 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9816 		/* Any PHY wasn't find */
   9817 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9818 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9819 		sc->sc_phytype = WMPHY_NONE;
   9820 	} else {
   9821 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9822 
   9823 		/*
   9824 		 * PHY Found! Check PHY type again by the second call of
   9825 		 * wm_gmii_setup_phytype.
   9826 		 */
   9827 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9828 		    child->mii_mpd_model);
   9829 
   9830 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9831 	}
   9832 }
   9833 
   9834 /*
   9835  * wm_gmii_mediachange:	[ifmedia interface function]
   9836  *
   9837  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9838  */
   9839 static int
   9840 wm_gmii_mediachange(struct ifnet *ifp)
   9841 {
   9842 	struct wm_softc *sc = ifp->if_softc;
   9843 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9844 	int rc;
   9845 
   9846 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9847 		device_xname(sc->sc_dev), __func__));
   9848 	if ((ifp->if_flags & IFF_UP) == 0)
   9849 		return 0;
   9850 
   9851 	/* Disable D0 LPLU. */
   9852 	wm_lplu_d0_disable(sc);
   9853 
   9854 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9855 	sc->sc_ctrl |= CTRL_SLU;
   9856 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9857 	    || (sc->sc_type > WM_T_82543)) {
   9858 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9859 	} else {
   9860 		sc->sc_ctrl &= ~CTRL_ASDE;
   9861 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9862 		if (ife->ifm_media & IFM_FDX)
   9863 			sc->sc_ctrl |= CTRL_FD;
   9864 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9865 		case IFM_10_T:
   9866 			sc->sc_ctrl |= CTRL_SPEED_10;
   9867 			break;
   9868 		case IFM_100_TX:
   9869 			sc->sc_ctrl |= CTRL_SPEED_100;
   9870 			break;
   9871 		case IFM_1000_T:
   9872 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9873 			break;
   9874 		default:
   9875 			panic("wm_gmii_mediachange: bad media 0x%x",
   9876 			    ife->ifm_media);
   9877 		}
   9878 	}
   9879 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9880 	CSR_WRITE_FLUSH(sc);
   9881 	if (sc->sc_type <= WM_T_82543)
   9882 		wm_gmii_reset(sc);
   9883 
   9884 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9885 		return 0;
   9886 	return rc;
   9887 }
   9888 
   9889 /*
   9890  * wm_gmii_mediastatus:	[ifmedia interface function]
   9891  *
   9892  *	Get the current interface media status on a 1000BASE-T device.
   9893  */
   9894 static void
   9895 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9896 {
   9897 	struct wm_softc *sc = ifp->if_softc;
   9898 
   9899 	ether_mediastatus(ifp, ifmr);
   9900 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9901 	    | sc->sc_flowflags;
   9902 }
   9903 
   9904 #define	MDI_IO		CTRL_SWDPIN(2)
   9905 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9906 #define	MDI_CLK		CTRL_SWDPIN(3)
   9907 
   9908 static void
   9909 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9910 {
   9911 	uint32_t i, v;
   9912 
   9913 	v = CSR_READ(sc, WMREG_CTRL);
   9914 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9915 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9916 
   9917 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9918 		if (data & i)
   9919 			v |= MDI_IO;
   9920 		else
   9921 			v &= ~MDI_IO;
   9922 		CSR_WRITE(sc, WMREG_CTRL, v);
   9923 		CSR_WRITE_FLUSH(sc);
   9924 		delay(10);
   9925 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9926 		CSR_WRITE_FLUSH(sc);
   9927 		delay(10);
   9928 		CSR_WRITE(sc, WMREG_CTRL, v);
   9929 		CSR_WRITE_FLUSH(sc);
   9930 		delay(10);
   9931 	}
   9932 }
   9933 
   9934 static uint32_t
   9935 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9936 {
   9937 	uint32_t v, i, data = 0;
   9938 
   9939 	v = CSR_READ(sc, WMREG_CTRL);
   9940 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9941 	v |= CTRL_SWDPIO(3);
   9942 
   9943 	CSR_WRITE(sc, WMREG_CTRL, v);
   9944 	CSR_WRITE_FLUSH(sc);
   9945 	delay(10);
   9946 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9947 	CSR_WRITE_FLUSH(sc);
   9948 	delay(10);
   9949 	CSR_WRITE(sc, WMREG_CTRL, v);
   9950 	CSR_WRITE_FLUSH(sc);
   9951 	delay(10);
   9952 
   9953 	for (i = 0; i < 16; i++) {
   9954 		data <<= 1;
   9955 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9956 		CSR_WRITE_FLUSH(sc);
   9957 		delay(10);
   9958 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9959 			data |= 1;
   9960 		CSR_WRITE(sc, WMREG_CTRL, v);
   9961 		CSR_WRITE_FLUSH(sc);
   9962 		delay(10);
   9963 	}
   9964 
   9965 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9966 	CSR_WRITE_FLUSH(sc);
   9967 	delay(10);
   9968 	CSR_WRITE(sc, WMREG_CTRL, v);
   9969 	CSR_WRITE_FLUSH(sc);
   9970 	delay(10);
   9971 
   9972 	return data;
   9973 }
   9974 
   9975 #undef MDI_IO
   9976 #undef MDI_DIR
   9977 #undef MDI_CLK
   9978 
   9979 /*
   9980  * wm_gmii_i82543_readreg:	[mii interface function]
   9981  *
   9982  *	Read a PHY register on the GMII (i82543 version).
   9983  */
   9984 static int
   9985 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9986 {
   9987 	struct wm_softc *sc = device_private(dev);
   9988 	int rv;
   9989 
   9990 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9991 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9992 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9993 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9994 
   9995 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9996 	    device_xname(dev), phy, reg, rv));
   9997 
   9998 	return rv;
   9999 }
   10000 
   10001 /*
   10002  * wm_gmii_i82543_writereg:	[mii interface function]
   10003  *
   10004  *	Write a PHY register on the GMII (i82543 version).
   10005  */
   10006 static void
   10007 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10008 {
   10009 	struct wm_softc *sc = device_private(dev);
   10010 
   10011 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10012 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10013 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10014 	    (MII_COMMAND_START << 30), 32);
   10015 }
   10016 
   10017 /*
   10018  * wm_gmii_mdic_readreg:	[mii interface function]
   10019  *
   10020  *	Read a PHY register on the GMII.
   10021  */
   10022 static int
   10023 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10024 {
   10025 	struct wm_softc *sc = device_private(dev);
   10026 	uint32_t mdic = 0;
   10027 	int i, rv;
   10028 
   10029 	if (reg > MII_ADDRMASK) {
   10030 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10031 		    __func__, sc->sc_phytype, reg);
   10032 		reg &= MII_ADDRMASK;
   10033 	}
   10034 
   10035 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10036 	    MDIC_REGADD(reg));
   10037 
   10038 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10039 		mdic = CSR_READ(sc, WMREG_MDIC);
   10040 		if (mdic & MDIC_READY)
   10041 			break;
   10042 		delay(50);
   10043 	}
   10044 
   10045 	if ((mdic & MDIC_READY) == 0) {
   10046 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10047 		    device_xname(dev), phy, reg);
   10048 		rv = 0;
   10049 	} else if (mdic & MDIC_E) {
   10050 #if 0 /* This is normal if no PHY is present. */
   10051 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10052 		    device_xname(dev), phy, reg);
   10053 #endif
   10054 		rv = 0;
   10055 	} else {
   10056 		rv = MDIC_DATA(mdic);
   10057 		if (rv == 0xffff)
   10058 			rv = 0;
   10059 	}
   10060 
   10061 	return rv;
   10062 }
   10063 
   10064 /*
   10065  * wm_gmii_mdic_writereg:	[mii interface function]
   10066  *
   10067  *	Write a PHY register on the GMII.
   10068  */
   10069 static void
   10070 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10071 {
   10072 	struct wm_softc *sc = device_private(dev);
   10073 	uint32_t mdic = 0;
   10074 	int i;
   10075 
   10076 	if (reg > MII_ADDRMASK) {
   10077 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10078 		    __func__, sc->sc_phytype, reg);
   10079 		reg &= MII_ADDRMASK;
   10080 	}
   10081 
   10082 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10083 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10084 
   10085 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10086 		mdic = CSR_READ(sc, WMREG_MDIC);
   10087 		if (mdic & MDIC_READY)
   10088 			break;
   10089 		delay(50);
   10090 	}
   10091 
   10092 	if ((mdic & MDIC_READY) == 0)
   10093 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10094 		    device_xname(dev), phy, reg);
   10095 	else if (mdic & MDIC_E)
   10096 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10097 		    device_xname(dev), phy, reg);
   10098 }
   10099 
   10100 /*
   10101  * wm_gmii_i82544_readreg:	[mii interface function]
   10102  *
   10103  *	Read a PHY register on the GMII.
   10104  */
   10105 static int
   10106 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10107 {
   10108 	struct wm_softc *sc = device_private(dev);
   10109 	int rv;
   10110 
   10111 	if (sc->phy.acquire(sc)) {
   10112 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10113 		return 0;
   10114 	}
   10115 
   10116 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10117 		switch (sc->sc_phytype) {
   10118 		case WMPHY_IGP:
   10119 		case WMPHY_IGP_2:
   10120 		case WMPHY_IGP_3:
   10121 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10122 			    reg);
   10123 			break;
   10124 		default:
   10125 #ifdef WM_DEBUG
   10126 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10127 			    __func__, sc->sc_phytype, reg);
   10128 #endif
   10129 			break;
   10130 		}
   10131 	}
   10132 
   10133 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10134 	sc->phy.release(sc);
   10135 
   10136 	return rv;
   10137 }
   10138 
   10139 /*
   10140  * wm_gmii_i82544_writereg:	[mii interface function]
   10141  *
   10142  *	Write a PHY register on the GMII.
   10143  */
   10144 static void
   10145 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10146 {
   10147 	struct wm_softc *sc = device_private(dev);
   10148 
   10149 	if (sc->phy.acquire(sc)) {
   10150 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10151 		return;
   10152 	}
   10153 
   10154 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10155 		switch (sc->sc_phytype) {
   10156 		case WMPHY_IGP:
   10157 		case WMPHY_IGP_2:
   10158 		case WMPHY_IGP_3:
   10159 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10160 			    reg);
   10161 			break;
   10162 		default:
   10163 #ifdef WM_DEBUG
   10164 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10165 			    __func__, sc->sc_phytype, reg);
   10166 #endif
   10167 			break;
   10168 		}
   10169 	}
   10170 
   10171 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10172 	sc->phy.release(sc);
   10173 }
   10174 
   10175 /*
   10176  * wm_gmii_i80003_readreg:	[mii interface function]
   10177  *
   10178  *	Read a PHY register on the kumeran
   10179  * This could be handled by the PHY layer if we didn't have to lock the
   10180  * ressource ...
   10181  */
   10182 static int
   10183 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10184 {
   10185 	struct wm_softc *sc = device_private(dev);
   10186 	int page_select, temp;
   10187 	int rv;
   10188 
   10189 	if (phy != 1) /* only one PHY on kumeran bus */
   10190 		return 0;
   10191 
   10192 	if (sc->phy.acquire(sc)) {
   10193 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10194 		return 0;
   10195 	}
   10196 
   10197 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10198 		page_select = GG82563_PHY_PAGE_SELECT;
   10199 	else {
   10200 		/*
   10201 		 * Use Alternative Page Select register to access registers
   10202 		 * 30 and 31.
   10203 		 */
   10204 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10205 	}
   10206 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10207 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10208 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10209 		/*
   10210 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10211 		 * register.
   10212 		 */
   10213 		delay(200);
   10214 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10215 			device_printf(dev, "%s failed\n", __func__);
   10216 			rv = 0; /* XXX */
   10217 			goto out;
   10218 		}
   10219 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10220 		delay(200);
   10221 	} else
   10222 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10223 
   10224 out:
   10225 	sc->phy.release(sc);
   10226 	return rv;
   10227 }
   10228 
   10229 /*
   10230  * wm_gmii_i80003_writereg:	[mii interface function]
   10231  *
   10232  *	Write a PHY register on the kumeran.
   10233  * This could be handled by the PHY layer if we didn't have to lock the
   10234  * ressource ...
   10235  */
   10236 static void
   10237 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10238 {
   10239 	struct wm_softc *sc = device_private(dev);
   10240 	int page_select, temp;
   10241 
   10242 	if (phy != 1) /* only one PHY on kumeran bus */
   10243 		return;
   10244 
   10245 	if (sc->phy.acquire(sc)) {
   10246 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10247 		return;
   10248 	}
   10249 
   10250 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10251 		page_select = GG82563_PHY_PAGE_SELECT;
   10252 	else {
   10253 		/*
   10254 		 * Use Alternative Page Select register to access registers
   10255 		 * 30 and 31.
   10256 		 */
   10257 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10258 	}
   10259 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10260 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10261 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10262 		/*
   10263 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10264 		 * register.
   10265 		 */
   10266 		delay(200);
   10267 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10268 			device_printf(dev, "%s failed\n", __func__);
   10269 			goto out;
   10270 		}
   10271 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10272 		delay(200);
   10273 	} else
   10274 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10275 
   10276 out:
   10277 	sc->phy.release(sc);
   10278 }
   10279 
   10280 /*
   10281  * wm_gmii_bm_readreg:	[mii interface function]
   10282  *
   10283  *	Read a PHY register on the kumeran
   10284  * This could be handled by the PHY layer if we didn't have to lock the
   10285  * ressource ...
   10286  */
   10287 static int
   10288 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10289 {
   10290 	struct wm_softc *sc = device_private(dev);
   10291 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10292 	uint16_t val;
   10293 	int rv;
   10294 
   10295 	if (sc->phy.acquire(sc)) {
   10296 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10297 		return 0;
   10298 	}
   10299 
   10300 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10301 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10302 		    || (reg == 31)) ? 1 : phy;
   10303 	/* Page 800 works differently than the rest so it has its own func */
   10304 	if (page == BM_WUC_PAGE) {
   10305 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10306 		rv = val;
   10307 		goto release;
   10308 	}
   10309 
   10310 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10311 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10312 		    && (sc->sc_type != WM_T_82583))
   10313 			wm_gmii_mdic_writereg(dev, phy,
   10314 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10315 		else
   10316 			wm_gmii_mdic_writereg(dev, phy,
   10317 			    BME1000_PHY_PAGE_SELECT, page);
   10318 	}
   10319 
   10320 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10321 
   10322 release:
   10323 	sc->phy.release(sc);
   10324 	return rv;
   10325 }
   10326 
   10327 /*
   10328  * wm_gmii_bm_writereg:	[mii interface function]
   10329  *
   10330  *	Write a PHY register on the kumeran.
   10331  * This could be handled by the PHY layer if we didn't have to lock the
   10332  * ressource ...
   10333  */
   10334 static void
   10335 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10336 {
   10337 	struct wm_softc *sc = device_private(dev);
   10338 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10339 
   10340 	if (sc->phy.acquire(sc)) {
   10341 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10342 		return;
   10343 	}
   10344 
   10345 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10346 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10347 		    || (reg == 31)) ? 1 : phy;
   10348 	/* Page 800 works differently than the rest so it has its own func */
   10349 	if (page == BM_WUC_PAGE) {
   10350 		uint16_t tmp;
   10351 
   10352 		tmp = val;
   10353 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10354 		goto release;
   10355 	}
   10356 
   10357 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10358 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10359 		    && (sc->sc_type != WM_T_82583))
   10360 			wm_gmii_mdic_writereg(dev, phy,
   10361 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10362 		else
   10363 			wm_gmii_mdic_writereg(dev, phy,
   10364 			    BME1000_PHY_PAGE_SELECT, page);
   10365 	}
   10366 
   10367 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10368 
   10369 release:
   10370 	sc->phy.release(sc);
   10371 }
   10372 
   10373 static void
   10374 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10375 {
   10376 	struct wm_softc *sc = device_private(dev);
   10377 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10378 	uint16_t wuce, reg;
   10379 
   10380 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10381 		device_xname(dev), __func__));
   10382 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10383 	if (sc->sc_type == WM_T_PCH) {
   10384 		/* XXX e1000 driver do nothing... why? */
   10385 	}
   10386 
   10387 	/*
   10388 	 * 1) Enable PHY wakeup register first.
   10389 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10390 	 */
   10391 
   10392 	/* Set page 769 */
   10393 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10394 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10395 
   10396 	/* Read WUCE and save it */
   10397 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10398 
   10399 	reg = wuce | BM_WUC_ENABLE_BIT;
   10400 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10401 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10402 
   10403 	/* Select page 800 */
   10404 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10405 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10406 
   10407 	/*
   10408 	 * 2) Access PHY wakeup register.
   10409 	 * See e1000_access_phy_wakeup_reg_bm.
   10410 	 */
   10411 
   10412 	/* Write page 800 */
   10413 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10414 
   10415 	if (rd)
   10416 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10417 	else
   10418 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10419 
   10420 	/*
   10421 	 * 3) Disable PHY wakeup register.
   10422 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10423 	 */
   10424 	/* Set page 769 */
   10425 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10426 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10427 
   10428 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10429 }
   10430 
   10431 /*
   10432  * wm_gmii_hv_readreg:	[mii interface function]
   10433  *
   10434  *	Read a PHY register on the kumeran
   10435  * This could be handled by the PHY layer if we didn't have to lock the
   10436  * ressource ...
   10437  */
   10438 static int
   10439 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10440 {
   10441 	struct wm_softc *sc = device_private(dev);
   10442 	int rv;
   10443 
   10444 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10445 		device_xname(dev), __func__));
   10446 	if (sc->phy.acquire(sc)) {
   10447 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10448 		return 0;
   10449 	}
   10450 
   10451 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10452 	sc->phy.release(sc);
   10453 	return rv;
   10454 }
   10455 
   10456 static int
   10457 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10458 {
   10459 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10460 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10461 	uint16_t val;
   10462 	int rv;
   10463 
   10464 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10465 
   10466 	/* Page 800 works differently than the rest so it has its own func */
   10467 	if (page == BM_WUC_PAGE) {
   10468 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10469 		return val;
   10470 	}
   10471 
   10472 	/*
   10473 	 * Lower than page 768 works differently than the rest so it has its
   10474 	 * own func
   10475 	 */
   10476 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10477 		printf("gmii_hv_readreg!!!\n");
   10478 		return 0;
   10479 	}
   10480 
   10481 	/*
   10482 	 * XXX I21[789] documents say that the SMBus Address register is at
   10483 	 * PHY address 01, Page 0 (not 768), Register 26.
   10484 	 */
   10485 	if (page == HV_INTC_FC_PAGE_START)
   10486 		page = 0;
   10487 
   10488 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10489 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10490 		    page << BME1000_PAGE_SHIFT);
   10491 	}
   10492 
   10493 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10494 	return rv;
   10495 }
   10496 
   10497 /*
   10498  * wm_gmii_hv_writereg:	[mii interface function]
   10499  *
   10500  *	Write a PHY register on the kumeran.
   10501  * This could be handled by the PHY layer if we didn't have to lock the
   10502  * ressource ...
   10503  */
   10504 static void
   10505 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10506 {
   10507 	struct wm_softc *sc = device_private(dev);
   10508 
   10509 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10510 		device_xname(dev), __func__));
   10511 
   10512 	if (sc->phy.acquire(sc)) {
   10513 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10514 		return;
   10515 	}
   10516 
   10517 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10518 	sc->phy.release(sc);
   10519 }
   10520 
   10521 static void
   10522 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10523 {
   10524 	struct wm_softc *sc = device_private(dev);
   10525 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10526 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10527 
   10528 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10529 
   10530 	/* Page 800 works differently than the rest so it has its own func */
   10531 	if (page == BM_WUC_PAGE) {
   10532 		uint16_t tmp;
   10533 
   10534 		tmp = val;
   10535 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10536 		return;
   10537 	}
   10538 
   10539 	/*
   10540 	 * Lower than page 768 works differently than the rest so it has its
   10541 	 * own func
   10542 	 */
   10543 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10544 		printf("gmii_hv_writereg!!!\n");
   10545 		return;
   10546 	}
   10547 
   10548 	{
   10549 		/*
   10550 		 * XXX I21[789] documents say that the SMBus Address register
   10551 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10552 		 */
   10553 		if (page == HV_INTC_FC_PAGE_START)
   10554 			page = 0;
   10555 
   10556 		/*
   10557 		 * XXX Workaround MDIO accesses being disabled after entering
   10558 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10559 		 * register is set)
   10560 		 */
   10561 		if (sc->sc_phytype == WMPHY_82578) {
   10562 			struct mii_softc *child;
   10563 
   10564 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10565 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10566 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10567 			    && ((val & (1 << 11)) != 0)) {
   10568 				printf("XXX need workaround\n");
   10569 			}
   10570 		}
   10571 
   10572 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10573 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10574 			    page << BME1000_PAGE_SHIFT);
   10575 		}
   10576 	}
   10577 
   10578 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10579 }
   10580 
   10581 /*
   10582  * wm_gmii_82580_readreg:	[mii interface function]
   10583  *
   10584  *	Read a PHY register on the 82580 and I350.
   10585  * This could be handled by the PHY layer if we didn't have to lock the
   10586  * ressource ...
   10587  */
   10588 static int
   10589 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10590 {
   10591 	struct wm_softc *sc = device_private(dev);
   10592 	int rv;
   10593 
   10594 	if (sc->phy.acquire(sc) != 0) {
   10595 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10596 		return 0;
   10597 	}
   10598 
   10599 #ifdef DIAGNOSTIC
   10600 	if (reg > MII_ADDRMASK) {
   10601 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10602 		    __func__, sc->sc_phytype, reg);
   10603 		reg &= MII_ADDRMASK;
   10604 	}
   10605 #endif
   10606 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10607 
   10608 	sc->phy.release(sc);
   10609 	return rv;
   10610 }
   10611 
   10612 /*
   10613  * wm_gmii_82580_writereg:	[mii interface function]
   10614  *
   10615  *	Write a PHY register on the 82580 and I350.
   10616  * This could be handled by the PHY layer if we didn't have to lock the
   10617  * ressource ...
   10618  */
   10619 static void
   10620 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10621 {
   10622 	struct wm_softc *sc = device_private(dev);
   10623 
   10624 	if (sc->phy.acquire(sc) != 0) {
   10625 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10626 		return;
   10627 	}
   10628 
   10629 #ifdef DIAGNOSTIC
   10630 	if (reg > MII_ADDRMASK) {
   10631 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10632 		    __func__, sc->sc_phytype, reg);
   10633 		reg &= MII_ADDRMASK;
   10634 	}
   10635 #endif
   10636 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10637 
   10638 	sc->phy.release(sc);
   10639 }
   10640 
   10641 /*
   10642  * wm_gmii_gs40g_readreg:	[mii interface function]
   10643  *
   10644  *	Read a PHY register on the I2100 and I211.
   10645  * This could be handled by the PHY layer if we didn't have to lock the
   10646  * ressource ...
   10647  */
   10648 static int
   10649 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10650 {
   10651 	struct wm_softc *sc = device_private(dev);
   10652 	int page, offset;
   10653 	int rv;
   10654 
   10655 	/* Acquire semaphore */
   10656 	if (sc->phy.acquire(sc)) {
   10657 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10658 		return 0;
   10659 	}
   10660 
   10661 	/* Page select */
   10662 	page = reg >> GS40G_PAGE_SHIFT;
   10663 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10664 
   10665 	/* Read reg */
   10666 	offset = reg & GS40G_OFFSET_MASK;
   10667 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10668 
   10669 	sc->phy.release(sc);
   10670 	return rv;
   10671 }
   10672 
   10673 /*
   10674  * wm_gmii_gs40g_writereg:	[mii interface function]
   10675  *
   10676  *	Write a PHY register on the I210 and I211.
   10677  * This could be handled by the PHY layer if we didn't have to lock the
   10678  * ressource ...
   10679  */
   10680 static void
   10681 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10682 {
   10683 	struct wm_softc *sc = device_private(dev);
   10684 	int page, offset;
   10685 
   10686 	/* Acquire semaphore */
   10687 	if (sc->phy.acquire(sc)) {
   10688 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10689 		return;
   10690 	}
   10691 
   10692 	/* Page select */
   10693 	page = reg >> GS40G_PAGE_SHIFT;
   10694 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10695 
   10696 	/* Write reg */
   10697 	offset = reg & GS40G_OFFSET_MASK;
   10698 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10699 
   10700 	/* Release semaphore */
   10701 	sc->phy.release(sc);
   10702 }
   10703 
   10704 /*
   10705  * wm_gmii_statchg:	[mii interface function]
   10706  *
   10707  *	Callback from MII layer when media changes.
   10708  */
   10709 static void
   10710 wm_gmii_statchg(struct ifnet *ifp)
   10711 {
   10712 	struct wm_softc *sc = ifp->if_softc;
   10713 	struct mii_data *mii = &sc->sc_mii;
   10714 
   10715 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10716 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10717 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10718 
   10719 	/*
   10720 	 * Get flow control negotiation result.
   10721 	 */
   10722 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10723 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10724 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10725 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10726 	}
   10727 
   10728 	if (sc->sc_flowflags & IFM_FLOW) {
   10729 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10730 			sc->sc_ctrl |= CTRL_TFCE;
   10731 			sc->sc_fcrtl |= FCRTL_XONE;
   10732 		}
   10733 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10734 			sc->sc_ctrl |= CTRL_RFCE;
   10735 	}
   10736 
   10737 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10738 		DPRINTF(WM_DEBUG_LINK,
   10739 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10740 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10741 	} else {
   10742 		DPRINTF(WM_DEBUG_LINK,
   10743 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10744 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10745 	}
   10746 
   10747 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10748 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10749 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10750 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10751 	if (sc->sc_type == WM_T_80003) {
   10752 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10753 		case IFM_1000_T:
   10754 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10755 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10756 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10757 			break;
   10758 		default:
   10759 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10760 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10761 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10762 			break;
   10763 		}
   10764 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10765 	}
   10766 }
   10767 
   10768 /* kumeran related (80003, ICH* and PCH*) */
   10769 
   10770 /*
   10771  * wm_kmrn_readreg:
   10772  *
   10773  *	Read a kumeran register
   10774  */
   10775 static int
   10776 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10777 {
   10778 	int rv;
   10779 
   10780 	if (sc->sc_type == WM_T_80003)
   10781 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10782 	else
   10783 		rv = sc->phy.acquire(sc);
   10784 	if (rv != 0) {
   10785 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10786 		    __func__);
   10787 		return rv;
   10788 	}
   10789 
   10790 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10791 
   10792 	if (sc->sc_type == WM_T_80003)
   10793 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10794 	else
   10795 		sc->phy.release(sc);
   10796 
   10797 	return rv;
   10798 }
   10799 
   10800 static int
   10801 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10802 {
   10803 
   10804 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10805 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10806 	    KUMCTRLSTA_REN);
   10807 	CSR_WRITE_FLUSH(sc);
   10808 	delay(2);
   10809 
   10810 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10811 
   10812 	return 0;
   10813 }
   10814 
   10815 /*
   10816  * wm_kmrn_writereg:
   10817  *
   10818  *	Write a kumeran register
   10819  */
   10820 static int
   10821 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10822 {
   10823 	int rv;
   10824 
   10825 	if (sc->sc_type == WM_T_80003)
   10826 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10827 	else
   10828 		rv = sc->phy.acquire(sc);
   10829 	if (rv != 0) {
   10830 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10831 		    __func__);
   10832 		return rv;
   10833 	}
   10834 
   10835 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10836 
   10837 	if (sc->sc_type == WM_T_80003)
   10838 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10839 	else
   10840 		sc->phy.release(sc);
   10841 
   10842 	return rv;
   10843 }
   10844 
   10845 static int
   10846 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10847 {
   10848 
   10849 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10850 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10851 
   10852 	return 0;
   10853 }
   10854 
   10855 /* SGMII related */
   10856 
   10857 /*
   10858  * wm_sgmii_uses_mdio
   10859  *
   10860  * Check whether the transaction is to the internal PHY or the external
   10861  * MDIO interface. Return true if it's MDIO.
   10862  */
   10863 static bool
   10864 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10865 {
   10866 	uint32_t reg;
   10867 	bool ismdio = false;
   10868 
   10869 	switch (sc->sc_type) {
   10870 	case WM_T_82575:
   10871 	case WM_T_82576:
   10872 		reg = CSR_READ(sc, WMREG_MDIC);
   10873 		ismdio = ((reg & MDIC_DEST) != 0);
   10874 		break;
   10875 	case WM_T_82580:
   10876 	case WM_T_I350:
   10877 	case WM_T_I354:
   10878 	case WM_T_I210:
   10879 	case WM_T_I211:
   10880 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10881 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10882 		break;
   10883 	default:
   10884 		break;
   10885 	}
   10886 
   10887 	return ismdio;
   10888 }
   10889 
   10890 /*
   10891  * wm_sgmii_readreg:	[mii interface function]
   10892  *
   10893  *	Read a PHY register on the SGMII
   10894  * This could be handled by the PHY layer if we didn't have to lock the
   10895  * ressource ...
   10896  */
   10897 static int
   10898 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10899 {
   10900 	struct wm_softc *sc = device_private(dev);
   10901 	uint32_t i2ccmd;
   10902 	int i, rv;
   10903 
   10904 	if (sc->phy.acquire(sc)) {
   10905 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10906 		return 0;
   10907 	}
   10908 
   10909 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10910 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10911 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10912 
   10913 	/* Poll the ready bit */
   10914 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10915 		delay(50);
   10916 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10917 		if (i2ccmd & I2CCMD_READY)
   10918 			break;
   10919 	}
   10920 	if ((i2ccmd & I2CCMD_READY) == 0)
   10921 		device_printf(dev, "I2CCMD Read did not complete\n");
   10922 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10923 		device_printf(dev, "I2CCMD Error bit set\n");
   10924 
   10925 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10926 
   10927 	sc->phy.release(sc);
   10928 	return rv;
   10929 }
   10930 
   10931 /*
   10932  * wm_sgmii_writereg:	[mii interface function]
   10933  *
   10934  *	Write a PHY register on the SGMII.
   10935  * This could be handled by the PHY layer if we didn't have to lock the
   10936  * ressource ...
   10937  */
   10938 static void
   10939 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10940 {
   10941 	struct wm_softc *sc = device_private(dev);
   10942 	uint32_t i2ccmd;
   10943 	int i;
   10944 	int swapdata;
   10945 
   10946 	if (sc->phy.acquire(sc) != 0) {
   10947 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10948 		return;
   10949 	}
   10950 	/* Swap the data bytes for the I2C interface */
   10951 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10952 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10953 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   10954 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10955 
   10956 	/* Poll the ready bit */
   10957 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10958 		delay(50);
   10959 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10960 		if (i2ccmd & I2CCMD_READY)
   10961 			break;
   10962 	}
   10963 	if ((i2ccmd & I2CCMD_READY) == 0)
   10964 		device_printf(dev, "I2CCMD Write did not complete\n");
   10965 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10966 		device_printf(dev, "I2CCMD Error bit set\n");
   10967 
   10968 	sc->phy.release(sc);
   10969 }
   10970 
   10971 /* TBI related */
   10972 
   10973 /*
   10974  * wm_tbi_mediainit:
   10975  *
   10976  *	Initialize media for use on 1000BASE-X devices.
   10977  */
   10978 static void
   10979 wm_tbi_mediainit(struct wm_softc *sc)
   10980 {
   10981 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10982 	const char *sep = "";
   10983 
   10984 	if (sc->sc_type < WM_T_82543)
   10985 		sc->sc_tipg = TIPG_WM_DFLT;
   10986 	else
   10987 		sc->sc_tipg = TIPG_LG_DFLT;
   10988 
   10989 	sc->sc_tbi_serdes_anegticks = 5;
   10990 
   10991 	/* Initialize our media structures */
   10992 	sc->sc_mii.mii_ifp = ifp;
   10993 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10994 
   10995 	if ((sc->sc_type >= WM_T_82575)
   10996 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10997 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10998 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10999 	else
   11000 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11001 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11002 
   11003 	/*
   11004 	 * SWD Pins:
   11005 	 *
   11006 	 *	0 = Link LED (output)
   11007 	 *	1 = Loss Of Signal (input)
   11008 	 */
   11009 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11010 
   11011 	/* XXX Perhaps this is only for TBI */
   11012 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11013 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11014 
   11015 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11016 		sc->sc_ctrl &= ~CTRL_LRST;
   11017 
   11018 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11019 
   11020 #define	ADD(ss, mm, dd)							\
   11021 do {									\
   11022 	aprint_normal("%s%s", sep, ss);					\
   11023 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11024 	sep = ", ";							\
   11025 } while (/*CONSTCOND*/0)
   11026 
   11027 	aprint_normal_dev(sc->sc_dev, "");
   11028 
   11029 	if (sc->sc_type == WM_T_I354) {
   11030 		uint32_t status;
   11031 
   11032 		status = CSR_READ(sc, WMREG_STATUS);
   11033 		if (((status & STATUS_2P5_SKU) != 0)
   11034 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11035 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11036 		} else
   11037 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11038 	} else if (sc->sc_type == WM_T_82545) {
   11039 		/* Only 82545 is LX (XXX except SFP) */
   11040 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11041 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11042 	} else {
   11043 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11044 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11045 	}
   11046 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11047 	aprint_normal("\n");
   11048 
   11049 #undef ADD
   11050 
   11051 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11052 }
   11053 
   11054 /*
   11055  * wm_tbi_mediachange:	[ifmedia interface function]
   11056  *
   11057  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11058  */
   11059 static int
   11060 wm_tbi_mediachange(struct ifnet *ifp)
   11061 {
   11062 	struct wm_softc *sc = ifp->if_softc;
   11063 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11064 	uint32_t status;
   11065 	int i;
   11066 
   11067 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11068 		/* XXX need some work for >= 82571 and < 82575 */
   11069 		if (sc->sc_type < WM_T_82575)
   11070 			return 0;
   11071 	}
   11072 
   11073 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11074 	    || (sc->sc_type >= WM_T_82575))
   11075 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11076 
   11077 	sc->sc_ctrl &= ~CTRL_LRST;
   11078 	sc->sc_txcw = TXCW_ANE;
   11079 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11080 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11081 	else if (ife->ifm_media & IFM_FDX)
   11082 		sc->sc_txcw |= TXCW_FD;
   11083 	else
   11084 		sc->sc_txcw |= TXCW_HD;
   11085 
   11086 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11087 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11088 
   11089 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11090 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11091 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11092 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11093 	CSR_WRITE_FLUSH(sc);
   11094 	delay(1000);
   11095 
   11096 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11097 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11098 
   11099 	/*
   11100 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11101 	 * optics detect a signal, 0 if they don't.
   11102 	 */
   11103 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11104 		/* Have signal; wait for the link to come up. */
   11105 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11106 			delay(10000);
   11107 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11108 				break;
   11109 		}
   11110 
   11111 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11112 			    device_xname(sc->sc_dev),i));
   11113 
   11114 		status = CSR_READ(sc, WMREG_STATUS);
   11115 		DPRINTF(WM_DEBUG_LINK,
   11116 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11117 			device_xname(sc->sc_dev),status, STATUS_LU));
   11118 		if (status & STATUS_LU) {
   11119 			/* Link is up. */
   11120 			DPRINTF(WM_DEBUG_LINK,
   11121 			    ("%s: LINK: set media -> link up %s\n",
   11122 			    device_xname(sc->sc_dev),
   11123 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11124 
   11125 			/*
   11126 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11127 			 * so we should update sc->sc_ctrl
   11128 			 */
   11129 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11130 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11131 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11132 			if (status & STATUS_FD)
   11133 				sc->sc_tctl |=
   11134 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11135 			else
   11136 				sc->sc_tctl |=
   11137 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11138 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11139 				sc->sc_fcrtl |= FCRTL_XONE;
   11140 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11141 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11142 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11143 				      sc->sc_fcrtl);
   11144 			sc->sc_tbi_linkup = 1;
   11145 		} else {
   11146 			if (i == WM_LINKUP_TIMEOUT)
   11147 				wm_check_for_link(sc);
   11148 			/* Link is down. */
   11149 			DPRINTF(WM_DEBUG_LINK,
   11150 			    ("%s: LINK: set media -> link down\n",
   11151 			    device_xname(sc->sc_dev)));
   11152 			sc->sc_tbi_linkup = 0;
   11153 		}
   11154 	} else {
   11155 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11156 		    device_xname(sc->sc_dev)));
   11157 		sc->sc_tbi_linkup = 0;
   11158 	}
   11159 
   11160 	wm_tbi_serdes_set_linkled(sc);
   11161 
   11162 	return 0;
   11163 }
   11164 
   11165 /*
   11166  * wm_tbi_mediastatus:	[ifmedia interface function]
   11167  *
   11168  *	Get the current interface media status on a 1000BASE-X device.
   11169  */
   11170 static void
   11171 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11172 {
   11173 	struct wm_softc *sc = ifp->if_softc;
   11174 	uint32_t ctrl, status;
   11175 
   11176 	ifmr->ifm_status = IFM_AVALID;
   11177 	ifmr->ifm_active = IFM_ETHER;
   11178 
   11179 	status = CSR_READ(sc, WMREG_STATUS);
   11180 	if ((status & STATUS_LU) == 0) {
   11181 		ifmr->ifm_active |= IFM_NONE;
   11182 		return;
   11183 	}
   11184 
   11185 	ifmr->ifm_status |= IFM_ACTIVE;
   11186 	/* Only 82545 is LX */
   11187 	if (sc->sc_type == WM_T_82545)
   11188 		ifmr->ifm_active |= IFM_1000_LX;
   11189 	else
   11190 		ifmr->ifm_active |= IFM_1000_SX;
   11191 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11192 		ifmr->ifm_active |= IFM_FDX;
   11193 	else
   11194 		ifmr->ifm_active |= IFM_HDX;
   11195 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11196 	if (ctrl & CTRL_RFCE)
   11197 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11198 	if (ctrl & CTRL_TFCE)
   11199 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11200 }
   11201 
   11202 /* XXX TBI only */
   11203 static int
   11204 wm_check_for_link(struct wm_softc *sc)
   11205 {
   11206 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11207 	uint32_t rxcw;
   11208 	uint32_t ctrl;
   11209 	uint32_t status;
   11210 	uint32_t sig;
   11211 
   11212 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11213 		/* XXX need some work for >= 82571 */
   11214 		if (sc->sc_type >= WM_T_82571) {
   11215 			sc->sc_tbi_linkup = 1;
   11216 			return 0;
   11217 		}
   11218 	}
   11219 
   11220 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11221 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11222 	status = CSR_READ(sc, WMREG_STATUS);
   11223 
   11224 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11225 
   11226 	DPRINTF(WM_DEBUG_LINK,
   11227 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11228 		device_xname(sc->sc_dev), __func__,
   11229 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11230 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11231 
   11232 	/*
   11233 	 * SWDPIN   LU RXCW
   11234 	 *      0    0    0
   11235 	 *      0    0    1	(should not happen)
   11236 	 *      0    1    0	(should not happen)
   11237 	 *      0    1    1	(should not happen)
   11238 	 *      1    0    0	Disable autonego and force linkup
   11239 	 *      1    0    1	got /C/ but not linkup yet
   11240 	 *      1    1    0	(linkup)
   11241 	 *      1    1    1	If IFM_AUTO, back to autonego
   11242 	 *
   11243 	 */
   11244 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11245 	    && ((status & STATUS_LU) == 0)
   11246 	    && ((rxcw & RXCW_C) == 0)) {
   11247 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11248 			__func__));
   11249 		sc->sc_tbi_linkup = 0;
   11250 		/* Disable auto-negotiation in the TXCW register */
   11251 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11252 
   11253 		/*
   11254 		 * Force link-up and also force full-duplex.
   11255 		 *
   11256 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11257 		 * so we should update sc->sc_ctrl
   11258 		 */
   11259 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11260 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11261 	} else if (((status & STATUS_LU) != 0)
   11262 	    && ((rxcw & RXCW_C) != 0)
   11263 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11264 		sc->sc_tbi_linkup = 1;
   11265 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11266 			__func__));
   11267 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11268 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11269 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11270 	    && ((rxcw & RXCW_C) != 0)) {
   11271 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11272 	} else {
   11273 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11274 			status));
   11275 	}
   11276 
   11277 	return 0;
   11278 }
   11279 
   11280 /*
   11281  * wm_tbi_tick:
   11282  *
   11283  *	Check the link on TBI devices.
   11284  *	This function acts as mii_tick().
   11285  */
   11286 static void
   11287 wm_tbi_tick(struct wm_softc *sc)
   11288 {
   11289 	struct mii_data *mii = &sc->sc_mii;
   11290 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11291 	uint32_t status;
   11292 
   11293 	KASSERT(WM_CORE_LOCKED(sc));
   11294 
   11295 	status = CSR_READ(sc, WMREG_STATUS);
   11296 
   11297 	/* XXX is this needed? */
   11298 	(void)CSR_READ(sc, WMREG_RXCW);
   11299 	(void)CSR_READ(sc, WMREG_CTRL);
   11300 
   11301 	/* set link status */
   11302 	if ((status & STATUS_LU) == 0) {
   11303 		DPRINTF(WM_DEBUG_LINK,
   11304 		    ("%s: LINK: checklink -> down\n",
   11305 			device_xname(sc->sc_dev)));
   11306 		sc->sc_tbi_linkup = 0;
   11307 	} else if (sc->sc_tbi_linkup == 0) {
   11308 		DPRINTF(WM_DEBUG_LINK,
   11309 		    ("%s: LINK: checklink -> up %s\n",
   11310 			device_xname(sc->sc_dev),
   11311 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11312 		sc->sc_tbi_linkup = 1;
   11313 		sc->sc_tbi_serdes_ticks = 0;
   11314 	}
   11315 
   11316 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11317 		goto setled;
   11318 
   11319 	if ((status & STATUS_LU) == 0) {
   11320 		sc->sc_tbi_linkup = 0;
   11321 		/* If the timer expired, retry autonegotiation */
   11322 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11323 		    && (++sc->sc_tbi_serdes_ticks
   11324 			>= sc->sc_tbi_serdes_anegticks)) {
   11325 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11326 			sc->sc_tbi_serdes_ticks = 0;
   11327 			/*
   11328 			 * Reset the link, and let autonegotiation do
   11329 			 * its thing
   11330 			 */
   11331 			sc->sc_ctrl |= CTRL_LRST;
   11332 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11333 			CSR_WRITE_FLUSH(sc);
   11334 			delay(1000);
   11335 			sc->sc_ctrl &= ~CTRL_LRST;
   11336 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11337 			CSR_WRITE_FLUSH(sc);
   11338 			delay(1000);
   11339 			CSR_WRITE(sc, WMREG_TXCW,
   11340 			    sc->sc_txcw & ~TXCW_ANE);
   11341 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11342 		}
   11343 	}
   11344 
   11345 setled:
   11346 	wm_tbi_serdes_set_linkled(sc);
   11347 }
   11348 
   11349 /* SERDES related */
   11350 static void
   11351 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11352 {
   11353 	uint32_t reg;
   11354 
   11355 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11356 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11357 		return;
   11358 
   11359 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11360 	reg |= PCS_CFG_PCS_EN;
   11361 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11362 
   11363 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11364 	reg &= ~CTRL_EXT_SWDPIN(3);
   11365 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11366 	CSR_WRITE_FLUSH(sc);
   11367 }
   11368 
   11369 static int
   11370 wm_serdes_mediachange(struct ifnet *ifp)
   11371 {
   11372 	struct wm_softc *sc = ifp->if_softc;
   11373 	bool pcs_autoneg = true; /* XXX */
   11374 	uint32_t ctrl_ext, pcs_lctl, reg;
   11375 
   11376 	/* XXX Currently, this function is not called on 8257[12] */
   11377 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11378 	    || (sc->sc_type >= WM_T_82575))
   11379 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11380 
   11381 	wm_serdes_power_up_link_82575(sc);
   11382 
   11383 	sc->sc_ctrl |= CTRL_SLU;
   11384 
   11385 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11386 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11387 
   11388 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11389 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11390 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11391 	case CTRL_EXT_LINK_MODE_SGMII:
   11392 		pcs_autoneg = true;
   11393 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11394 		break;
   11395 	case CTRL_EXT_LINK_MODE_1000KX:
   11396 		pcs_autoneg = false;
   11397 		/* FALLTHROUGH */
   11398 	default:
   11399 		if ((sc->sc_type == WM_T_82575)
   11400 		    || (sc->sc_type == WM_T_82576)) {
   11401 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11402 				pcs_autoneg = false;
   11403 		}
   11404 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11405 		    | CTRL_FRCFDX;
   11406 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11407 	}
   11408 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11409 
   11410 	if (pcs_autoneg) {
   11411 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11412 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11413 
   11414 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11415 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11416 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11417 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11418 	} else
   11419 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11420 
   11421 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11422 
   11423 
   11424 	return 0;
   11425 }
   11426 
   11427 static void
   11428 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11429 {
   11430 	struct wm_softc *sc = ifp->if_softc;
   11431 	struct mii_data *mii = &sc->sc_mii;
   11432 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11433 	uint32_t pcs_adv, pcs_lpab, reg;
   11434 
   11435 	ifmr->ifm_status = IFM_AVALID;
   11436 	ifmr->ifm_active = IFM_ETHER;
   11437 
   11438 	/* Check PCS */
   11439 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11440 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11441 		ifmr->ifm_active |= IFM_NONE;
   11442 		sc->sc_tbi_linkup = 0;
   11443 		goto setled;
   11444 	}
   11445 
   11446 	sc->sc_tbi_linkup = 1;
   11447 	ifmr->ifm_status |= IFM_ACTIVE;
   11448 	if (sc->sc_type == WM_T_I354) {
   11449 		uint32_t status;
   11450 
   11451 		status = CSR_READ(sc, WMREG_STATUS);
   11452 		if (((status & STATUS_2P5_SKU) != 0)
   11453 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11454 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11455 		} else
   11456 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11457 	} else {
   11458 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11459 		case PCS_LSTS_SPEED_10:
   11460 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11461 			break;
   11462 		case PCS_LSTS_SPEED_100:
   11463 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11464 			break;
   11465 		case PCS_LSTS_SPEED_1000:
   11466 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11467 			break;
   11468 		default:
   11469 			device_printf(sc->sc_dev, "Unknown speed\n");
   11470 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11471 			break;
   11472 		}
   11473 	}
   11474 	if ((reg & PCS_LSTS_FDX) != 0)
   11475 		ifmr->ifm_active |= IFM_FDX;
   11476 	else
   11477 		ifmr->ifm_active |= IFM_HDX;
   11478 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11479 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11480 		/* Check flow */
   11481 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11482 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11483 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11484 			goto setled;
   11485 		}
   11486 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11487 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11488 		DPRINTF(WM_DEBUG_LINK,
   11489 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11490 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11491 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11492 			mii->mii_media_active |= IFM_FLOW
   11493 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11494 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11495 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11496 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11497 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11498 			mii->mii_media_active |= IFM_FLOW
   11499 			    | IFM_ETH_TXPAUSE;
   11500 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11501 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11502 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11503 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11504 			mii->mii_media_active |= IFM_FLOW
   11505 			    | IFM_ETH_RXPAUSE;
   11506 		}
   11507 	}
   11508 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11509 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11510 setled:
   11511 	wm_tbi_serdes_set_linkled(sc);
   11512 }
   11513 
   11514 /*
   11515  * wm_serdes_tick:
   11516  *
   11517  *	Check the link on serdes devices.
   11518  */
   11519 static void
   11520 wm_serdes_tick(struct wm_softc *sc)
   11521 {
   11522 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11523 	struct mii_data *mii = &sc->sc_mii;
   11524 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11525 	uint32_t reg;
   11526 
   11527 	KASSERT(WM_CORE_LOCKED(sc));
   11528 
   11529 	mii->mii_media_status = IFM_AVALID;
   11530 	mii->mii_media_active = IFM_ETHER;
   11531 
   11532 	/* Check PCS */
   11533 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11534 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11535 		mii->mii_media_status |= IFM_ACTIVE;
   11536 		sc->sc_tbi_linkup = 1;
   11537 		sc->sc_tbi_serdes_ticks = 0;
   11538 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11539 		if ((reg & PCS_LSTS_FDX) != 0)
   11540 			mii->mii_media_active |= IFM_FDX;
   11541 		else
   11542 			mii->mii_media_active |= IFM_HDX;
   11543 	} else {
   11544 		mii->mii_media_status |= IFM_NONE;
   11545 		sc->sc_tbi_linkup = 0;
   11546 		/* If the timer expired, retry autonegotiation */
   11547 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11548 		    && (++sc->sc_tbi_serdes_ticks
   11549 			>= sc->sc_tbi_serdes_anegticks)) {
   11550 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11551 			sc->sc_tbi_serdes_ticks = 0;
   11552 			/* XXX */
   11553 			wm_serdes_mediachange(ifp);
   11554 		}
   11555 	}
   11556 
   11557 	wm_tbi_serdes_set_linkled(sc);
   11558 }
   11559 
   11560 /* SFP related */
   11561 
   11562 static int
   11563 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11564 {
   11565 	uint32_t i2ccmd;
   11566 	int i;
   11567 
   11568 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11569 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11570 
   11571 	/* Poll the ready bit */
   11572 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11573 		delay(50);
   11574 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11575 		if (i2ccmd & I2CCMD_READY)
   11576 			break;
   11577 	}
   11578 	if ((i2ccmd & I2CCMD_READY) == 0)
   11579 		return -1;
   11580 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11581 		return -1;
   11582 
   11583 	*data = i2ccmd & 0x00ff;
   11584 
   11585 	return 0;
   11586 }
   11587 
   11588 static uint32_t
   11589 wm_sfp_get_media_type(struct wm_softc *sc)
   11590 {
   11591 	uint32_t ctrl_ext;
   11592 	uint8_t val = 0;
   11593 	int timeout = 3;
   11594 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11595 	int rv = -1;
   11596 
   11597 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11598 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11599 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11600 	CSR_WRITE_FLUSH(sc);
   11601 
   11602 	/* Read SFP module data */
   11603 	while (timeout) {
   11604 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11605 		if (rv == 0)
   11606 			break;
   11607 		delay(100*1000); /* XXX too big */
   11608 		timeout--;
   11609 	}
   11610 	if (rv != 0)
   11611 		goto out;
   11612 	switch (val) {
   11613 	case SFF_SFP_ID_SFF:
   11614 		aprint_normal_dev(sc->sc_dev,
   11615 		    "Module/Connector soldered to board\n");
   11616 		break;
   11617 	case SFF_SFP_ID_SFP:
   11618 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11619 		break;
   11620 	case SFF_SFP_ID_UNKNOWN:
   11621 		goto out;
   11622 	default:
   11623 		break;
   11624 	}
   11625 
   11626 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11627 	if (rv != 0) {
   11628 		goto out;
   11629 	}
   11630 
   11631 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11632 		mediatype = WM_MEDIATYPE_SERDES;
   11633 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11634 		sc->sc_flags |= WM_F_SGMII;
   11635 		mediatype = WM_MEDIATYPE_COPPER;
   11636 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11637 		sc->sc_flags |= WM_F_SGMII;
   11638 		mediatype = WM_MEDIATYPE_SERDES;
   11639 	}
   11640 
   11641 out:
   11642 	/* Restore I2C interface setting */
   11643 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11644 
   11645 	return mediatype;
   11646 }
   11647 
   11648 /*
   11649  * NVM related.
   11650  * Microwire, SPI (w/wo EERD) and Flash.
   11651  */
   11652 
   11653 /* Both spi and uwire */
   11654 
   11655 /*
   11656  * wm_eeprom_sendbits:
   11657  *
   11658  *	Send a series of bits to the EEPROM.
   11659  */
   11660 static void
   11661 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11662 {
   11663 	uint32_t reg;
   11664 	int x;
   11665 
   11666 	reg = CSR_READ(sc, WMREG_EECD);
   11667 
   11668 	for (x = nbits; x > 0; x--) {
   11669 		if (bits & (1U << (x - 1)))
   11670 			reg |= EECD_DI;
   11671 		else
   11672 			reg &= ~EECD_DI;
   11673 		CSR_WRITE(sc, WMREG_EECD, reg);
   11674 		CSR_WRITE_FLUSH(sc);
   11675 		delay(2);
   11676 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11677 		CSR_WRITE_FLUSH(sc);
   11678 		delay(2);
   11679 		CSR_WRITE(sc, WMREG_EECD, reg);
   11680 		CSR_WRITE_FLUSH(sc);
   11681 		delay(2);
   11682 	}
   11683 }
   11684 
   11685 /*
   11686  * wm_eeprom_recvbits:
   11687  *
   11688  *	Receive a series of bits from the EEPROM.
   11689  */
   11690 static void
   11691 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11692 {
   11693 	uint32_t reg, val;
   11694 	int x;
   11695 
   11696 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11697 
   11698 	val = 0;
   11699 	for (x = nbits; x > 0; x--) {
   11700 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11701 		CSR_WRITE_FLUSH(sc);
   11702 		delay(2);
   11703 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11704 			val |= (1U << (x - 1));
   11705 		CSR_WRITE(sc, WMREG_EECD, reg);
   11706 		CSR_WRITE_FLUSH(sc);
   11707 		delay(2);
   11708 	}
   11709 	*valp = val;
   11710 }
   11711 
   11712 /* Microwire */
   11713 
   11714 /*
   11715  * wm_nvm_read_uwire:
   11716  *
   11717  *	Read a word from the EEPROM using the MicroWire protocol.
   11718  */
   11719 static int
   11720 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11721 {
   11722 	uint32_t reg, val;
   11723 	int i;
   11724 
   11725 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11726 		device_xname(sc->sc_dev), __func__));
   11727 
   11728 	if (sc->nvm.acquire(sc) != 0)
   11729 		return -1;
   11730 
   11731 	for (i = 0; i < wordcnt; i++) {
   11732 		/* Clear SK and DI. */
   11733 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11734 		CSR_WRITE(sc, WMREG_EECD, reg);
   11735 
   11736 		/*
   11737 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11738 		 * and Xen.
   11739 		 *
   11740 		 * We use this workaround only for 82540 because qemu's
   11741 		 * e1000 act as 82540.
   11742 		 */
   11743 		if (sc->sc_type == WM_T_82540) {
   11744 			reg |= EECD_SK;
   11745 			CSR_WRITE(sc, WMREG_EECD, reg);
   11746 			reg &= ~EECD_SK;
   11747 			CSR_WRITE(sc, WMREG_EECD, reg);
   11748 			CSR_WRITE_FLUSH(sc);
   11749 			delay(2);
   11750 		}
   11751 		/* XXX: end of workaround */
   11752 
   11753 		/* Set CHIP SELECT. */
   11754 		reg |= EECD_CS;
   11755 		CSR_WRITE(sc, WMREG_EECD, reg);
   11756 		CSR_WRITE_FLUSH(sc);
   11757 		delay(2);
   11758 
   11759 		/* Shift in the READ command. */
   11760 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11761 
   11762 		/* Shift in address. */
   11763 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11764 
   11765 		/* Shift out the data. */
   11766 		wm_eeprom_recvbits(sc, &val, 16);
   11767 		data[i] = val & 0xffff;
   11768 
   11769 		/* Clear CHIP SELECT. */
   11770 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11771 		CSR_WRITE(sc, WMREG_EECD, reg);
   11772 		CSR_WRITE_FLUSH(sc);
   11773 		delay(2);
   11774 	}
   11775 
   11776 	sc->nvm.release(sc);
   11777 	return 0;
   11778 }
   11779 
   11780 /* SPI */
   11781 
   11782 /*
   11783  * Set SPI and FLASH related information from the EECD register.
   11784  * For 82541 and 82547, the word size is taken from EEPROM.
   11785  */
   11786 static int
   11787 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11788 {
   11789 	int size;
   11790 	uint32_t reg;
   11791 	uint16_t data;
   11792 
   11793 	reg = CSR_READ(sc, WMREG_EECD);
   11794 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11795 
   11796 	/* Read the size of NVM from EECD by default */
   11797 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11798 	switch (sc->sc_type) {
   11799 	case WM_T_82541:
   11800 	case WM_T_82541_2:
   11801 	case WM_T_82547:
   11802 	case WM_T_82547_2:
   11803 		/* Set dummy value to access EEPROM */
   11804 		sc->sc_nvm_wordsize = 64;
   11805 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11806 			aprint_error_dev(sc->sc_dev,
   11807 			    "%s: failed to read EEPROM size\n", __func__);
   11808 		}
   11809 		reg = data;
   11810 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11811 		if (size == 0)
   11812 			size = 6; /* 64 word size */
   11813 		else
   11814 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11815 		break;
   11816 	case WM_T_80003:
   11817 	case WM_T_82571:
   11818 	case WM_T_82572:
   11819 	case WM_T_82573: /* SPI case */
   11820 	case WM_T_82574: /* SPI case */
   11821 	case WM_T_82583: /* SPI case */
   11822 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11823 		if (size > 14)
   11824 			size = 14;
   11825 		break;
   11826 	case WM_T_82575:
   11827 	case WM_T_82576:
   11828 	case WM_T_82580:
   11829 	case WM_T_I350:
   11830 	case WM_T_I354:
   11831 	case WM_T_I210:
   11832 	case WM_T_I211:
   11833 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11834 		if (size > 15)
   11835 			size = 15;
   11836 		break;
   11837 	default:
   11838 		aprint_error_dev(sc->sc_dev,
   11839 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11840 		return -1;
   11841 		break;
   11842 	}
   11843 
   11844 	sc->sc_nvm_wordsize = 1 << size;
   11845 
   11846 	return 0;
   11847 }
   11848 
   11849 /*
   11850  * wm_nvm_ready_spi:
   11851  *
   11852  *	Wait for a SPI EEPROM to be ready for commands.
   11853  */
   11854 static int
   11855 wm_nvm_ready_spi(struct wm_softc *sc)
   11856 {
   11857 	uint32_t val;
   11858 	int usec;
   11859 
   11860 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11861 		device_xname(sc->sc_dev), __func__));
   11862 
   11863 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11864 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11865 		wm_eeprom_recvbits(sc, &val, 8);
   11866 		if ((val & SPI_SR_RDY) == 0)
   11867 			break;
   11868 	}
   11869 	if (usec >= SPI_MAX_RETRIES) {
   11870 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11871 		return -1;
   11872 	}
   11873 	return 0;
   11874 }
   11875 
   11876 /*
   11877  * wm_nvm_read_spi:
   11878  *
   11879  *	Read a work from the EEPROM using the SPI protocol.
   11880  */
   11881 static int
   11882 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11883 {
   11884 	uint32_t reg, val;
   11885 	int i;
   11886 	uint8_t opc;
   11887 	int rv = 0;
   11888 
   11889 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11890 		device_xname(sc->sc_dev), __func__));
   11891 
   11892 	if (sc->nvm.acquire(sc) != 0)
   11893 		return -1;
   11894 
   11895 	/* Clear SK and CS. */
   11896 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11897 	CSR_WRITE(sc, WMREG_EECD, reg);
   11898 	CSR_WRITE_FLUSH(sc);
   11899 	delay(2);
   11900 
   11901 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11902 		goto out;
   11903 
   11904 	/* Toggle CS to flush commands. */
   11905 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11906 	CSR_WRITE_FLUSH(sc);
   11907 	delay(2);
   11908 	CSR_WRITE(sc, WMREG_EECD, reg);
   11909 	CSR_WRITE_FLUSH(sc);
   11910 	delay(2);
   11911 
   11912 	opc = SPI_OPC_READ;
   11913 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11914 		opc |= SPI_OPC_A8;
   11915 
   11916 	wm_eeprom_sendbits(sc, opc, 8);
   11917 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11918 
   11919 	for (i = 0; i < wordcnt; i++) {
   11920 		wm_eeprom_recvbits(sc, &val, 16);
   11921 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11922 	}
   11923 
   11924 	/* Raise CS and clear SK. */
   11925 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11926 	CSR_WRITE(sc, WMREG_EECD, reg);
   11927 	CSR_WRITE_FLUSH(sc);
   11928 	delay(2);
   11929 
   11930 out:
   11931 	sc->nvm.release(sc);
   11932 	return rv;
   11933 }
   11934 
   11935 /* Using with EERD */
   11936 
   11937 static int
   11938 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11939 {
   11940 	uint32_t attempts = 100000;
   11941 	uint32_t i, reg = 0;
   11942 	int32_t done = -1;
   11943 
   11944 	for (i = 0; i < attempts; i++) {
   11945 		reg = CSR_READ(sc, rw);
   11946 
   11947 		if (reg & EERD_DONE) {
   11948 			done = 0;
   11949 			break;
   11950 		}
   11951 		delay(5);
   11952 	}
   11953 
   11954 	return done;
   11955 }
   11956 
   11957 static int
   11958 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   11959 {
   11960 	int i, eerd = 0;
   11961 	int rv = 0;
   11962 
   11963 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11964 		device_xname(sc->sc_dev), __func__));
   11965 
   11966 	if (sc->nvm.acquire(sc) != 0)
   11967 		return -1;
   11968 
   11969 	for (i = 0; i < wordcnt; i++) {
   11970 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11971 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11972 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11973 		if (rv != 0) {
   11974 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11975 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11976 			break;
   11977 		}
   11978 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11979 	}
   11980 
   11981 	sc->nvm.release(sc);
   11982 	return rv;
   11983 }
   11984 
   11985 /* Flash */
   11986 
   11987 static int
   11988 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11989 {
   11990 	uint32_t eecd;
   11991 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11992 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11993 	uint32_t nvm_dword = 0;
   11994 	uint8_t sig_byte = 0;
   11995  	int rv;
   11996 
   11997 	switch (sc->sc_type) {
   11998 	case WM_T_PCH_SPT:
   11999 	case WM_T_PCH_CNP:
   12000 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12001 		act_offset = ICH_NVM_SIG_WORD * 2;
   12002 
   12003 		/* set bank to 0 in case flash read fails. */
   12004 		*bank = 0;
   12005 
   12006 		/* Check bank 0 */
   12007 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12008 		if (rv != 0)
   12009 			return rv;
   12010 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12011 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12012 			*bank = 0;
   12013 			return 0;
   12014 		}
   12015 
   12016 		/* Check bank 1 */
   12017 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12018 		    &nvm_dword);
   12019 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12020 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12021 			*bank = 1;
   12022 			return 0;
   12023 		}
   12024 		aprint_error_dev(sc->sc_dev,
   12025 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12026 		return -1;
   12027 	case WM_T_ICH8:
   12028 	case WM_T_ICH9:
   12029 		eecd = CSR_READ(sc, WMREG_EECD);
   12030 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12031 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12032 			return 0;
   12033 		}
   12034 		/* FALLTHROUGH */
   12035 	default:
   12036 		/* Default to 0 */
   12037 		*bank = 0;
   12038 
   12039 		/* Check bank 0 */
   12040 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12041 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12042 			*bank = 0;
   12043 			return 0;
   12044 		}
   12045 
   12046 		/* Check bank 1 */
   12047 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12048 		    &sig_byte);
   12049 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12050 			*bank = 1;
   12051 			return 0;
   12052 		}
   12053 	}
   12054 
   12055 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12056 		device_xname(sc->sc_dev)));
   12057 	return -1;
   12058 }
   12059 
   12060 /******************************************************************************
   12061  * This function does initial flash setup so that a new read/write/erase cycle
   12062  * can be started.
   12063  *
   12064  * sc - The pointer to the hw structure
   12065  ****************************************************************************/
   12066 static int32_t
   12067 wm_ich8_cycle_init(struct wm_softc *sc)
   12068 {
   12069 	uint16_t hsfsts;
   12070 	int32_t error = 1;
   12071 	int32_t i     = 0;
   12072 
   12073 	if (sc->sc_type >= WM_T_PCH_SPT)
   12074 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12075 	else
   12076 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12077 
   12078 	/* May be check the Flash Des Valid bit in Hw status */
   12079 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12080 		return error;
   12081 	}
   12082 
   12083 	/* Clear FCERR in Hw status by writing 1 */
   12084 	/* Clear DAEL in Hw status by writing a 1 */
   12085 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12086 
   12087 	if (sc->sc_type >= WM_T_PCH_SPT)
   12088 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12089 	else
   12090 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12091 
   12092 	/*
   12093 	 * Either we should have a hardware SPI cycle in progress bit to check
   12094 	 * against, in order to start a new cycle or FDONE bit should be
   12095 	 * changed in the hardware so that it is 1 after harware reset, which
   12096 	 * can then be used as an indication whether a cycle is in progress or
   12097 	 * has been completed .. we should also have some software semaphore
   12098 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12099 	 * threads access to those bits can be sequentiallized or a way so that
   12100 	 * 2 threads dont start the cycle at the same time
   12101 	 */
   12102 
   12103 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12104 		/*
   12105 		 * There is no cycle running at present, so we can start a
   12106 		 * cycle
   12107 		 */
   12108 
   12109 		/* Begin by setting Flash Cycle Done. */
   12110 		hsfsts |= HSFSTS_DONE;
   12111 		if (sc->sc_type >= WM_T_PCH_SPT)
   12112 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12113 			    hsfsts & 0xffffUL);
   12114 		else
   12115 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12116 		error = 0;
   12117 	} else {
   12118 		/*
   12119 		 * otherwise poll for sometime so the current cycle has a
   12120 		 * chance to end before giving up.
   12121 		 */
   12122 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12123 			if (sc->sc_type >= WM_T_PCH_SPT)
   12124 				hsfsts = ICH8_FLASH_READ32(sc,
   12125 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12126 			else
   12127 				hsfsts = ICH8_FLASH_READ16(sc,
   12128 				    ICH_FLASH_HSFSTS);
   12129 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12130 				error = 0;
   12131 				break;
   12132 			}
   12133 			delay(1);
   12134 		}
   12135 		if (error == 0) {
   12136 			/*
   12137 			 * Successful in waiting for previous cycle to timeout,
   12138 			 * now set the Flash Cycle Done.
   12139 			 */
   12140 			hsfsts |= HSFSTS_DONE;
   12141 			if (sc->sc_type >= WM_T_PCH_SPT)
   12142 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12143 				    hsfsts & 0xffffUL);
   12144 			else
   12145 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12146 				    hsfsts);
   12147 		}
   12148 	}
   12149 	return error;
   12150 }
   12151 
   12152 /******************************************************************************
   12153  * This function starts a flash cycle and waits for its completion
   12154  *
   12155  * sc - The pointer to the hw structure
   12156  ****************************************************************************/
   12157 static int32_t
   12158 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12159 {
   12160 	uint16_t hsflctl;
   12161 	uint16_t hsfsts;
   12162 	int32_t error = 1;
   12163 	uint32_t i = 0;
   12164 
   12165 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12166 	if (sc->sc_type >= WM_T_PCH_SPT)
   12167 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12168 	else
   12169 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12170 	hsflctl |= HSFCTL_GO;
   12171 	if (sc->sc_type >= WM_T_PCH_SPT)
   12172 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12173 		    (uint32_t)hsflctl << 16);
   12174 	else
   12175 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12176 
   12177 	/* Wait till FDONE bit is set to 1 */
   12178 	do {
   12179 		if (sc->sc_type >= WM_T_PCH_SPT)
   12180 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12181 			    & 0xffffUL;
   12182 		else
   12183 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12184 		if (hsfsts & HSFSTS_DONE)
   12185 			break;
   12186 		delay(1);
   12187 		i++;
   12188 	} while (i < timeout);
   12189 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12190 		error = 0;
   12191 
   12192 	return error;
   12193 }
   12194 
   12195 /******************************************************************************
   12196  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12197  *
   12198  * sc - The pointer to the hw structure
   12199  * index - The index of the byte or word to read.
   12200  * size - Size of data to read, 1=byte 2=word, 4=dword
   12201  * data - Pointer to the word to store the value read.
   12202  *****************************************************************************/
   12203 static int32_t
   12204 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12205     uint32_t size, uint32_t *data)
   12206 {
   12207 	uint16_t hsfsts;
   12208 	uint16_t hsflctl;
   12209 	uint32_t flash_linear_address;
   12210 	uint32_t flash_data = 0;
   12211 	int32_t error = 1;
   12212 	int32_t count = 0;
   12213 
   12214 	if (size < 1  || size > 4 || data == 0x0 ||
   12215 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12216 		return error;
   12217 
   12218 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12219 	    sc->sc_ich8_flash_base;
   12220 
   12221 	do {
   12222 		delay(1);
   12223 		/* Steps */
   12224 		error = wm_ich8_cycle_init(sc);
   12225 		if (error)
   12226 			break;
   12227 
   12228 		if (sc->sc_type >= WM_T_PCH_SPT)
   12229 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12230 			    >> 16;
   12231 		else
   12232 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12233 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12234 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12235 		    & HSFCTL_BCOUNT_MASK;
   12236 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12237 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12238 			/*
   12239 			 * In SPT, This register is in Lan memory space, not
   12240 			 * flash. Therefore, only 32 bit access is supported.
   12241 			 */
   12242 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12243 			    (uint32_t)hsflctl << 16);
   12244 		} else
   12245 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12246 
   12247 		/*
   12248 		 * Write the last 24 bits of index into Flash Linear address
   12249 		 * field in Flash Address
   12250 		 */
   12251 		/* TODO: TBD maybe check the index against the size of flash */
   12252 
   12253 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12254 
   12255 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12256 
   12257 		/*
   12258 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12259 		 * the whole sequence a few more times, else read in (shift in)
   12260 		 * the Flash Data0, the order is least significant byte first
   12261 		 * msb to lsb
   12262 		 */
   12263 		if (error == 0) {
   12264 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12265 			if (size == 1)
   12266 				*data = (uint8_t)(flash_data & 0x000000FF);
   12267 			else if (size == 2)
   12268 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12269 			else if (size == 4)
   12270 				*data = (uint32_t)flash_data;
   12271 			break;
   12272 		} else {
   12273 			/*
   12274 			 * If we've gotten here, then things are probably
   12275 			 * completely hosed, but if the error condition is
   12276 			 * detected, it won't hurt to give it another try...
   12277 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12278 			 */
   12279 			if (sc->sc_type >= WM_T_PCH_SPT)
   12280 				hsfsts = ICH8_FLASH_READ32(sc,
   12281 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12282 			else
   12283 				hsfsts = ICH8_FLASH_READ16(sc,
   12284 				    ICH_FLASH_HSFSTS);
   12285 
   12286 			if (hsfsts & HSFSTS_ERR) {
   12287 				/* Repeat for some time before giving up. */
   12288 				continue;
   12289 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12290 				break;
   12291 		}
   12292 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12293 
   12294 	return error;
   12295 }
   12296 
   12297 /******************************************************************************
   12298  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12299  *
   12300  * sc - pointer to wm_hw structure
   12301  * index - The index of the byte to read.
   12302  * data - Pointer to a byte to store the value read.
   12303  *****************************************************************************/
   12304 static int32_t
   12305 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12306 {
   12307 	int32_t status;
   12308 	uint32_t word = 0;
   12309 
   12310 	status = wm_read_ich8_data(sc, index, 1, &word);
   12311 	if (status == 0)
   12312 		*data = (uint8_t)word;
   12313 	else
   12314 		*data = 0;
   12315 
   12316 	return status;
   12317 }
   12318 
   12319 /******************************************************************************
   12320  * Reads a word from the NVM using the ICH8 flash access registers.
   12321  *
   12322  * sc - pointer to wm_hw structure
   12323  * index - The starting byte index of the word to read.
   12324  * data - Pointer to a word to store the value read.
   12325  *****************************************************************************/
   12326 static int32_t
   12327 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12328 {
   12329 	int32_t status;
   12330 	uint32_t word = 0;
   12331 
   12332 	status = wm_read_ich8_data(sc, index, 2, &word);
   12333 	if (status == 0)
   12334 		*data = (uint16_t)word;
   12335 	else
   12336 		*data = 0;
   12337 
   12338 	return status;
   12339 }
   12340 
   12341 /******************************************************************************
   12342  * Reads a dword from the NVM using the ICH8 flash access registers.
   12343  *
   12344  * sc - pointer to wm_hw structure
   12345  * index - The starting byte index of the word to read.
   12346  * data - Pointer to a word to store the value read.
   12347  *****************************************************************************/
   12348 static int32_t
   12349 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12350 {
   12351 	int32_t status;
   12352 
   12353 	status = wm_read_ich8_data(sc, index, 4, data);
   12354 	return status;
   12355 }
   12356 
   12357 /******************************************************************************
   12358  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12359  * register.
   12360  *
   12361  * sc - Struct containing variables accessed by shared code
   12362  * offset - offset of word in the EEPROM to read
   12363  * data - word read from the EEPROM
   12364  * words - number of words to read
   12365  *****************************************************************************/
   12366 static int
   12367 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12368 {
   12369 	int32_t  rv = 0;
   12370 	uint32_t flash_bank = 0;
   12371 	uint32_t act_offset = 0;
   12372 	uint32_t bank_offset = 0;
   12373 	uint16_t word = 0;
   12374 	uint16_t i = 0;
   12375 
   12376 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12377 		device_xname(sc->sc_dev), __func__));
   12378 
   12379 	if (sc->nvm.acquire(sc) != 0)
   12380 		return -1;
   12381 
   12382 	/*
   12383 	 * We need to know which is the valid flash bank.  In the event
   12384 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12385 	 * managing flash_bank.  So it cannot be trusted and needs
   12386 	 * to be updated with each read.
   12387 	 */
   12388 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12389 	if (rv) {
   12390 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12391 			device_xname(sc->sc_dev)));
   12392 		flash_bank = 0;
   12393 	}
   12394 
   12395 	/*
   12396 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12397 	 * size
   12398 	 */
   12399 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12400 
   12401 	for (i = 0; i < words; i++) {
   12402 		/* The NVM part needs a byte offset, hence * 2 */
   12403 		act_offset = bank_offset + ((offset + i) * 2);
   12404 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12405 		if (rv) {
   12406 			aprint_error_dev(sc->sc_dev,
   12407 			    "%s: failed to read NVM\n", __func__);
   12408 			break;
   12409 		}
   12410 		data[i] = word;
   12411 	}
   12412 
   12413 	sc->nvm.release(sc);
   12414 	return rv;
   12415 }
   12416 
   12417 /******************************************************************************
   12418  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12419  * register.
   12420  *
   12421  * sc - Struct containing variables accessed by shared code
   12422  * offset - offset of word in the EEPROM to read
   12423  * data - word read from the EEPROM
   12424  * words - number of words to read
   12425  *****************************************************************************/
   12426 static int
   12427 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12428 {
   12429 	int32_t  rv = 0;
   12430 	uint32_t flash_bank = 0;
   12431 	uint32_t act_offset = 0;
   12432 	uint32_t bank_offset = 0;
   12433 	uint32_t dword = 0;
   12434 	uint16_t i = 0;
   12435 
   12436 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12437 		device_xname(sc->sc_dev), __func__));
   12438 
   12439 	if (sc->nvm.acquire(sc) != 0)
   12440 		return -1;
   12441 
   12442 	/*
   12443 	 * We need to know which is the valid flash bank.  In the event
   12444 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12445 	 * managing flash_bank.  So it cannot be trusted and needs
   12446 	 * to be updated with each read.
   12447 	 */
   12448 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12449 	if (rv) {
   12450 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12451 			device_xname(sc->sc_dev)));
   12452 		flash_bank = 0;
   12453 	}
   12454 
   12455 	/*
   12456 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12457 	 * size
   12458 	 */
   12459 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12460 
   12461 	for (i = 0; i < words; i++) {
   12462 		/* The NVM part needs a byte offset, hence * 2 */
   12463 		act_offset = bank_offset + ((offset + i) * 2);
   12464 		/* but we must read dword aligned, so mask ... */
   12465 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12466 		if (rv) {
   12467 			aprint_error_dev(sc->sc_dev,
   12468 			    "%s: failed to read NVM\n", __func__);
   12469 			break;
   12470 		}
   12471 		/* ... and pick out low or high word */
   12472 		if ((act_offset & 0x2) == 0)
   12473 			data[i] = (uint16_t)(dword & 0xFFFF);
   12474 		else
   12475 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12476 	}
   12477 
   12478 	sc->nvm.release(sc);
   12479 	return rv;
   12480 }
   12481 
   12482 /* iNVM */
   12483 
   12484 static int
   12485 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12486 {
   12487 	int32_t  rv = 0;
   12488 	uint32_t invm_dword;
   12489 	uint16_t i;
   12490 	uint8_t record_type, word_address;
   12491 
   12492 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12493 		device_xname(sc->sc_dev), __func__));
   12494 
   12495 	for (i = 0; i < INVM_SIZE; i++) {
   12496 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12497 		/* Get record type */
   12498 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12499 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12500 			break;
   12501 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12502 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12503 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12504 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12505 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12506 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12507 			if (word_address == address) {
   12508 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12509 				rv = 0;
   12510 				break;
   12511 			}
   12512 		}
   12513 	}
   12514 
   12515 	return rv;
   12516 }
   12517 
   12518 static int
   12519 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12520 {
   12521 	int rv = 0;
   12522 	int i;
   12523 
   12524 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12525 		device_xname(sc->sc_dev), __func__));
   12526 
   12527 	if (sc->nvm.acquire(sc) != 0)
   12528 		return -1;
   12529 
   12530 	for (i = 0; i < words; i++) {
   12531 		switch (offset + i) {
   12532 		case NVM_OFF_MACADDR:
   12533 		case NVM_OFF_MACADDR1:
   12534 		case NVM_OFF_MACADDR2:
   12535 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12536 			if (rv != 0) {
   12537 				data[i] = 0xffff;
   12538 				rv = -1;
   12539 			}
   12540 			break;
   12541 		case NVM_OFF_CFG2:
   12542 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12543 			if (rv != 0) {
   12544 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12545 				rv = 0;
   12546 			}
   12547 			break;
   12548 		case NVM_OFF_CFG4:
   12549 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12550 			if (rv != 0) {
   12551 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12552 				rv = 0;
   12553 			}
   12554 			break;
   12555 		case NVM_OFF_LED_1_CFG:
   12556 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12557 			if (rv != 0) {
   12558 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12559 				rv = 0;
   12560 			}
   12561 			break;
   12562 		case NVM_OFF_LED_0_2_CFG:
   12563 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12564 			if (rv != 0) {
   12565 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12566 				rv = 0;
   12567 			}
   12568 			break;
   12569 		case NVM_OFF_ID_LED_SETTINGS:
   12570 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12571 			if (rv != 0) {
   12572 				*data = ID_LED_RESERVED_FFFF;
   12573 				rv = 0;
   12574 			}
   12575 			break;
   12576 		default:
   12577 			DPRINTF(WM_DEBUG_NVM,
   12578 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12579 			*data = NVM_RESERVED_WORD;
   12580 			break;
   12581 		}
   12582 	}
   12583 
   12584 	sc->nvm.release(sc);
   12585 	return rv;
   12586 }
   12587 
   12588 /* Lock, detecting NVM type, validate checksum, version and read */
   12589 
   12590 static int
   12591 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12592 {
   12593 	uint32_t eecd = 0;
   12594 
   12595 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12596 	    || sc->sc_type == WM_T_82583) {
   12597 		eecd = CSR_READ(sc, WMREG_EECD);
   12598 
   12599 		/* Isolate bits 15 & 16 */
   12600 		eecd = ((eecd >> 15) & 0x03);
   12601 
   12602 		/* If both bits are set, device is Flash type */
   12603 		if (eecd == 0x03)
   12604 			return 0;
   12605 	}
   12606 	return 1;
   12607 }
   12608 
   12609 static int
   12610 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12611 {
   12612 	uint32_t eec;
   12613 
   12614 	eec = CSR_READ(sc, WMREG_EEC);
   12615 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12616 		return 1;
   12617 
   12618 	return 0;
   12619 }
   12620 
   12621 /*
   12622  * wm_nvm_validate_checksum
   12623  *
   12624  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12625  */
   12626 static int
   12627 wm_nvm_validate_checksum(struct wm_softc *sc)
   12628 {
   12629 	uint16_t checksum;
   12630 	uint16_t eeprom_data;
   12631 #ifdef WM_DEBUG
   12632 	uint16_t csum_wordaddr, valid_checksum;
   12633 #endif
   12634 	int i;
   12635 
   12636 	checksum = 0;
   12637 
   12638 	/* Don't check for I211 */
   12639 	if (sc->sc_type == WM_T_I211)
   12640 		return 0;
   12641 
   12642 #ifdef WM_DEBUG
   12643 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12644 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12645 		csum_wordaddr = NVM_OFF_COMPAT;
   12646 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12647 	} else {
   12648 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12649 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12650 	}
   12651 
   12652 	/* Dump EEPROM image for debug */
   12653 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12654 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12655 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12656 		/* XXX PCH_SPT? */
   12657 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12658 		if ((eeprom_data & valid_checksum) == 0) {
   12659 			DPRINTF(WM_DEBUG_NVM,
   12660 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12661 				device_xname(sc->sc_dev), eeprom_data,
   12662 				    valid_checksum));
   12663 		}
   12664 	}
   12665 
   12666 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12667 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12668 		for (i = 0; i < NVM_SIZE; i++) {
   12669 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12670 				printf("XXXX ");
   12671 			else
   12672 				printf("%04hx ", eeprom_data);
   12673 			if (i % 8 == 7)
   12674 				printf("\n");
   12675 		}
   12676 	}
   12677 
   12678 #endif /* WM_DEBUG */
   12679 
   12680 	for (i = 0; i < NVM_SIZE; i++) {
   12681 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12682 			return 1;
   12683 		checksum += eeprom_data;
   12684 	}
   12685 
   12686 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12687 #ifdef WM_DEBUG
   12688 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12689 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12690 #endif
   12691 	}
   12692 
   12693 	return 0;
   12694 }
   12695 
   12696 static void
   12697 wm_nvm_version_invm(struct wm_softc *sc)
   12698 {
   12699 	uint32_t dword;
   12700 
   12701 	/*
   12702 	 * Linux's code to decode version is very strange, so we don't
   12703 	 * obey that algorithm and just use word 61 as the document.
   12704 	 * Perhaps it's not perfect though...
   12705 	 *
   12706 	 * Example:
   12707 	 *
   12708 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12709 	 */
   12710 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12711 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12712 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12713 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12714 }
   12715 
   12716 static void
   12717 wm_nvm_version(struct wm_softc *sc)
   12718 {
   12719 	uint16_t major, minor, build, patch;
   12720 	uint16_t uid0, uid1;
   12721 	uint16_t nvm_data;
   12722 	uint16_t off;
   12723 	bool check_version = false;
   12724 	bool check_optionrom = false;
   12725 	bool have_build = false;
   12726 	bool have_uid = true;
   12727 
   12728 	/*
   12729 	 * Version format:
   12730 	 *
   12731 	 * XYYZ
   12732 	 * X0YZ
   12733 	 * X0YY
   12734 	 *
   12735 	 * Example:
   12736 	 *
   12737 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12738 	 *	82571	0x50a6	5.10.6?
   12739 	 *	82572	0x506a	5.6.10?
   12740 	 *	82572EI	0x5069	5.6.9?
   12741 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12742 	 *		0x2013	2.1.3?
   12743 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12744 	 */
   12745 
   12746 	/*
   12747 	 * XXX
   12748 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12749 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12750 	 */
   12751 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12752 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12753 		have_uid = false;
   12754 
   12755 	switch (sc->sc_type) {
   12756 	case WM_T_82571:
   12757 	case WM_T_82572:
   12758 	case WM_T_82574:
   12759 	case WM_T_82583:
   12760 		check_version = true;
   12761 		check_optionrom = true;
   12762 		have_build = true;
   12763 		break;
   12764 	case WM_T_82575:
   12765 	case WM_T_82576:
   12766 	case WM_T_82580:
   12767 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12768 			check_version = true;
   12769 		break;
   12770 	case WM_T_I211:
   12771 		wm_nvm_version_invm(sc);
   12772 		have_uid = false;
   12773 		goto printver;
   12774 	case WM_T_I210:
   12775 		if (!wm_nvm_flash_presence_i210(sc)) {
   12776 			wm_nvm_version_invm(sc);
   12777 			have_uid = false;
   12778 			goto printver;
   12779 		}
   12780 		/* FALLTHROUGH */
   12781 	case WM_T_I350:
   12782 	case WM_T_I354:
   12783 		check_version = true;
   12784 		check_optionrom = true;
   12785 		break;
   12786 	default:
   12787 		return;
   12788 	}
   12789 	if (check_version
   12790 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12791 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12792 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12793 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12794 			build = nvm_data & NVM_BUILD_MASK;
   12795 			have_build = true;
   12796 		} else
   12797 			minor = nvm_data & 0x00ff;
   12798 
   12799 		/* Decimal */
   12800 		minor = (minor / 16) * 10 + (minor % 16);
   12801 		sc->sc_nvm_ver_major = major;
   12802 		sc->sc_nvm_ver_minor = minor;
   12803 
   12804 printver:
   12805 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12806 		    sc->sc_nvm_ver_minor);
   12807 		if (have_build) {
   12808 			sc->sc_nvm_ver_build = build;
   12809 			aprint_verbose(".%d", build);
   12810 		}
   12811 	}
   12812 
   12813 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12814 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12815 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12816 		/* Option ROM Version */
   12817 		if ((off != 0x0000) && (off != 0xffff)) {
   12818 			int rv;
   12819 
   12820 			off += NVM_COMBO_VER_OFF;
   12821 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12822 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12823 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12824 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12825 				/* 16bits */
   12826 				major = uid0 >> 8;
   12827 				build = (uid0 << 8) | (uid1 >> 8);
   12828 				patch = uid1 & 0x00ff;
   12829 				aprint_verbose(", option ROM Version %d.%d.%d",
   12830 				    major, build, patch);
   12831 			}
   12832 		}
   12833 	}
   12834 
   12835 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12836 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12837 }
   12838 
   12839 /*
   12840  * wm_nvm_read:
   12841  *
   12842  *	Read data from the serial EEPROM.
   12843  */
   12844 static int
   12845 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12846 {
   12847 	int rv;
   12848 
   12849 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12850 		device_xname(sc->sc_dev), __func__));
   12851 
   12852 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12853 		return -1;
   12854 
   12855 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12856 
   12857 	return rv;
   12858 }
   12859 
   12860 /*
   12861  * Hardware semaphores.
   12862  * Very complexed...
   12863  */
   12864 
   12865 static int
   12866 wm_get_null(struct wm_softc *sc)
   12867 {
   12868 
   12869 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12870 		device_xname(sc->sc_dev), __func__));
   12871 	return 0;
   12872 }
   12873 
   12874 static void
   12875 wm_put_null(struct wm_softc *sc)
   12876 {
   12877 
   12878 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12879 		device_xname(sc->sc_dev), __func__));
   12880 	return;
   12881 }
   12882 
   12883 static int
   12884 wm_get_eecd(struct wm_softc *sc)
   12885 {
   12886 	uint32_t reg;
   12887 	int x;
   12888 
   12889 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12890 		device_xname(sc->sc_dev), __func__));
   12891 
   12892 	reg = CSR_READ(sc, WMREG_EECD);
   12893 
   12894 	/* Request EEPROM access. */
   12895 	reg |= EECD_EE_REQ;
   12896 	CSR_WRITE(sc, WMREG_EECD, reg);
   12897 
   12898 	/* ..and wait for it to be granted. */
   12899 	for (x = 0; x < 1000; x++) {
   12900 		reg = CSR_READ(sc, WMREG_EECD);
   12901 		if (reg & EECD_EE_GNT)
   12902 			break;
   12903 		delay(5);
   12904 	}
   12905 	if ((reg & EECD_EE_GNT) == 0) {
   12906 		aprint_error_dev(sc->sc_dev,
   12907 		    "could not acquire EEPROM GNT\n");
   12908 		reg &= ~EECD_EE_REQ;
   12909 		CSR_WRITE(sc, WMREG_EECD, reg);
   12910 		return -1;
   12911 	}
   12912 
   12913 	return 0;
   12914 }
   12915 
   12916 static void
   12917 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12918 {
   12919 
   12920 	*eecd |= EECD_SK;
   12921 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12922 	CSR_WRITE_FLUSH(sc);
   12923 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12924 		delay(1);
   12925 	else
   12926 		delay(50);
   12927 }
   12928 
   12929 static void
   12930 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12931 {
   12932 
   12933 	*eecd &= ~EECD_SK;
   12934 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12935 	CSR_WRITE_FLUSH(sc);
   12936 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12937 		delay(1);
   12938 	else
   12939 		delay(50);
   12940 }
   12941 
   12942 static void
   12943 wm_put_eecd(struct wm_softc *sc)
   12944 {
   12945 	uint32_t reg;
   12946 
   12947 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12948 		device_xname(sc->sc_dev), __func__));
   12949 
   12950 	/* Stop nvm */
   12951 	reg = CSR_READ(sc, WMREG_EECD);
   12952 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12953 		/* Pull CS high */
   12954 		reg |= EECD_CS;
   12955 		wm_nvm_eec_clock_lower(sc, &reg);
   12956 	} else {
   12957 		/* CS on Microwire is active-high */
   12958 		reg &= ~(EECD_CS | EECD_DI);
   12959 		CSR_WRITE(sc, WMREG_EECD, reg);
   12960 		wm_nvm_eec_clock_raise(sc, &reg);
   12961 		wm_nvm_eec_clock_lower(sc, &reg);
   12962 	}
   12963 
   12964 	reg = CSR_READ(sc, WMREG_EECD);
   12965 	reg &= ~EECD_EE_REQ;
   12966 	CSR_WRITE(sc, WMREG_EECD, reg);
   12967 
   12968 	return;
   12969 }
   12970 
   12971 /*
   12972  * Get hardware semaphore.
   12973  * Same as e1000_get_hw_semaphore_generic()
   12974  */
   12975 static int
   12976 wm_get_swsm_semaphore(struct wm_softc *sc)
   12977 {
   12978 	int32_t timeout;
   12979 	uint32_t swsm;
   12980 
   12981 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12982 		device_xname(sc->sc_dev), __func__));
   12983 	KASSERT(sc->sc_nvm_wordsize > 0);
   12984 
   12985 retry:
   12986 	/* Get the SW semaphore. */
   12987 	timeout = sc->sc_nvm_wordsize + 1;
   12988 	while (timeout) {
   12989 		swsm = CSR_READ(sc, WMREG_SWSM);
   12990 
   12991 		if ((swsm & SWSM_SMBI) == 0)
   12992 			break;
   12993 
   12994 		delay(50);
   12995 		timeout--;
   12996 	}
   12997 
   12998 	if (timeout == 0) {
   12999 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13000 			/*
   13001 			 * In rare circumstances, the SW semaphore may already
   13002 			 * be held unintentionally. Clear the semaphore once
   13003 			 * before giving up.
   13004 			 */
   13005 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13006 			wm_put_swsm_semaphore(sc);
   13007 			goto retry;
   13008 		}
   13009 		aprint_error_dev(sc->sc_dev,
   13010 		    "could not acquire SWSM SMBI\n");
   13011 		return 1;
   13012 	}
   13013 
   13014 	/* Get the FW semaphore. */
   13015 	timeout = sc->sc_nvm_wordsize + 1;
   13016 	while (timeout) {
   13017 		swsm = CSR_READ(sc, WMREG_SWSM);
   13018 		swsm |= SWSM_SWESMBI;
   13019 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13020 		/* If we managed to set the bit we got the semaphore. */
   13021 		swsm = CSR_READ(sc, WMREG_SWSM);
   13022 		if (swsm & SWSM_SWESMBI)
   13023 			break;
   13024 
   13025 		delay(50);
   13026 		timeout--;
   13027 	}
   13028 
   13029 	if (timeout == 0) {
   13030 		aprint_error_dev(sc->sc_dev,
   13031 		    "could not acquire SWSM SWESMBI\n");
   13032 		/* Release semaphores */
   13033 		wm_put_swsm_semaphore(sc);
   13034 		return 1;
   13035 	}
   13036 	return 0;
   13037 }
   13038 
   13039 /*
   13040  * Put hardware semaphore.
   13041  * Same as e1000_put_hw_semaphore_generic()
   13042  */
   13043 static void
   13044 wm_put_swsm_semaphore(struct wm_softc *sc)
   13045 {
   13046 	uint32_t swsm;
   13047 
   13048 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13049 		device_xname(sc->sc_dev), __func__));
   13050 
   13051 	swsm = CSR_READ(sc, WMREG_SWSM);
   13052 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13053 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13054 }
   13055 
   13056 /*
   13057  * Get SW/FW semaphore.
   13058  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13059  */
   13060 static int
   13061 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13062 {
   13063 	uint32_t swfw_sync;
   13064 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13065 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13066 	int timeout;
   13067 
   13068 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13069 		device_xname(sc->sc_dev), __func__));
   13070 
   13071 	if (sc->sc_type == WM_T_80003)
   13072 		timeout = 50;
   13073 	else
   13074 		timeout = 200;
   13075 
   13076 	while (timeout) {
   13077 		if (wm_get_swsm_semaphore(sc)) {
   13078 			aprint_error_dev(sc->sc_dev,
   13079 			    "%s: failed to get semaphore\n",
   13080 			    __func__);
   13081 			return 1;
   13082 		}
   13083 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13084 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13085 			swfw_sync |= swmask;
   13086 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13087 			wm_put_swsm_semaphore(sc);
   13088 			return 0;
   13089 		}
   13090 		wm_put_swsm_semaphore(sc);
   13091 		delay(5000);
   13092 		timeout--;
   13093 	}
   13094 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13095 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13096 	return 1;
   13097 }
   13098 
   13099 static void
   13100 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13101 {
   13102 	uint32_t swfw_sync;
   13103 
   13104 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13105 		device_xname(sc->sc_dev), __func__));
   13106 
   13107 	while (wm_get_swsm_semaphore(sc) != 0)
   13108 		continue;
   13109 
   13110 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13111 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13112 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13113 
   13114 	wm_put_swsm_semaphore(sc);
   13115 }
   13116 
   13117 static int
   13118 wm_get_nvm_80003(struct wm_softc *sc)
   13119 {
   13120 	int rv;
   13121 
   13122 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13123 		device_xname(sc->sc_dev), __func__));
   13124 
   13125 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13126 		aprint_error_dev(sc->sc_dev,
   13127 		    "%s: failed to get semaphore(SWFW)\n",
   13128 		    __func__);
   13129 		return rv;
   13130 	}
   13131 
   13132 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13133 	    && (rv = wm_get_eecd(sc)) != 0) {
   13134 		aprint_error_dev(sc->sc_dev,
   13135 		    "%s: failed to get semaphore(EECD)\n",
   13136 		    __func__);
   13137 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13138 		return rv;
   13139 	}
   13140 
   13141 	return 0;
   13142 }
   13143 
   13144 static void
   13145 wm_put_nvm_80003(struct wm_softc *sc)
   13146 {
   13147 
   13148 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13149 		device_xname(sc->sc_dev), __func__));
   13150 
   13151 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13152 		wm_put_eecd(sc);
   13153 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13154 }
   13155 
   13156 static int
   13157 wm_get_nvm_82571(struct wm_softc *sc)
   13158 {
   13159 	int rv;
   13160 
   13161 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13162 		device_xname(sc->sc_dev), __func__));
   13163 
   13164 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13165 		return rv;
   13166 
   13167 	switch (sc->sc_type) {
   13168 	case WM_T_82573:
   13169 		break;
   13170 	default:
   13171 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13172 			rv = wm_get_eecd(sc);
   13173 		break;
   13174 	}
   13175 
   13176 	if (rv != 0) {
   13177 		aprint_error_dev(sc->sc_dev,
   13178 		    "%s: failed to get semaphore\n",
   13179 		    __func__);
   13180 		wm_put_swsm_semaphore(sc);
   13181 	}
   13182 
   13183 	return rv;
   13184 }
   13185 
   13186 static void
   13187 wm_put_nvm_82571(struct wm_softc *sc)
   13188 {
   13189 
   13190 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13191 		device_xname(sc->sc_dev), __func__));
   13192 
   13193 	switch (sc->sc_type) {
   13194 	case WM_T_82573:
   13195 		break;
   13196 	default:
   13197 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13198 			wm_put_eecd(sc);
   13199 		break;
   13200 	}
   13201 
   13202 	wm_put_swsm_semaphore(sc);
   13203 }
   13204 
   13205 static int
   13206 wm_get_phy_82575(struct wm_softc *sc)
   13207 {
   13208 
   13209 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13210 		device_xname(sc->sc_dev), __func__));
   13211 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13212 }
   13213 
   13214 static void
   13215 wm_put_phy_82575(struct wm_softc *sc)
   13216 {
   13217 
   13218 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13219 		device_xname(sc->sc_dev), __func__));
   13220 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13221 }
   13222 
   13223 static int
   13224 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13225 {
   13226 	uint32_t ext_ctrl;
   13227 	int timeout = 200;
   13228 
   13229 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13230 		device_xname(sc->sc_dev), __func__));
   13231 
   13232 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13233 	for (timeout = 0; timeout < 200; timeout++) {
   13234 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13235 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13236 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13237 
   13238 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13239 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13240 			return 0;
   13241 		delay(5000);
   13242 	}
   13243 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13244 	    device_xname(sc->sc_dev), ext_ctrl);
   13245 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13246 	return 1;
   13247 }
   13248 
   13249 static void
   13250 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13251 {
   13252 	uint32_t ext_ctrl;
   13253 
   13254 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13255 		device_xname(sc->sc_dev), __func__));
   13256 
   13257 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13258 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13259 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13260 
   13261 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13262 }
   13263 
   13264 static int
   13265 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13266 {
   13267 	uint32_t ext_ctrl;
   13268 	int timeout;
   13269 
   13270 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13271 		device_xname(sc->sc_dev), __func__));
   13272 	mutex_enter(sc->sc_ich_phymtx);
   13273 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13274 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13275 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13276 			break;
   13277 		delay(1000);
   13278 	}
   13279 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13280 		printf("%s: SW has already locked the resource\n",
   13281 		    device_xname(sc->sc_dev));
   13282 		goto out;
   13283 	}
   13284 
   13285 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13286 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13287 	for (timeout = 0; timeout < 1000; timeout++) {
   13288 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13289 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13290 			break;
   13291 		delay(1000);
   13292 	}
   13293 	if (timeout >= 1000) {
   13294 		printf("%s: failed to acquire semaphore\n",
   13295 		    device_xname(sc->sc_dev));
   13296 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13297 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13298 		goto out;
   13299 	}
   13300 	return 0;
   13301 
   13302 out:
   13303 	mutex_exit(sc->sc_ich_phymtx);
   13304 	return 1;
   13305 }
   13306 
   13307 static void
   13308 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13309 {
   13310 	uint32_t ext_ctrl;
   13311 
   13312 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13313 		device_xname(sc->sc_dev), __func__));
   13314 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13315 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13316 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13317 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13318 	} else {
   13319 		printf("%s: Semaphore unexpectedly released\n",
   13320 		    device_xname(sc->sc_dev));
   13321 	}
   13322 
   13323 	mutex_exit(sc->sc_ich_phymtx);
   13324 }
   13325 
   13326 static int
   13327 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13328 {
   13329 
   13330 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13331 		device_xname(sc->sc_dev), __func__));
   13332 	mutex_enter(sc->sc_ich_nvmmtx);
   13333 
   13334 	return 0;
   13335 }
   13336 
   13337 static void
   13338 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13339 {
   13340 
   13341 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13342 		device_xname(sc->sc_dev), __func__));
   13343 	mutex_exit(sc->sc_ich_nvmmtx);
   13344 }
   13345 
   13346 static int
   13347 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13348 {
   13349 	int i = 0;
   13350 	uint32_t reg;
   13351 
   13352 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13353 		device_xname(sc->sc_dev), __func__));
   13354 
   13355 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13356 	do {
   13357 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13358 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13359 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13360 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13361 			break;
   13362 		delay(2*1000);
   13363 		i++;
   13364 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13365 
   13366 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13367 		wm_put_hw_semaphore_82573(sc);
   13368 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13369 		    device_xname(sc->sc_dev));
   13370 		return -1;
   13371 	}
   13372 
   13373 	return 0;
   13374 }
   13375 
   13376 static void
   13377 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13378 {
   13379 	uint32_t reg;
   13380 
   13381 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13382 		device_xname(sc->sc_dev), __func__));
   13383 
   13384 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13385 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13386 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13387 }
   13388 
   13389 /*
   13390  * Management mode and power management related subroutines.
   13391  * BMC, AMT, suspend/resume and EEE.
   13392  */
   13393 
   13394 #ifdef WM_WOL
   13395 static int
   13396 wm_check_mng_mode(struct wm_softc *sc)
   13397 {
   13398 	int rv;
   13399 
   13400 	switch (sc->sc_type) {
   13401 	case WM_T_ICH8:
   13402 	case WM_T_ICH9:
   13403 	case WM_T_ICH10:
   13404 	case WM_T_PCH:
   13405 	case WM_T_PCH2:
   13406 	case WM_T_PCH_LPT:
   13407 	case WM_T_PCH_SPT:
   13408 	case WM_T_PCH_CNP:
   13409 		rv = wm_check_mng_mode_ich8lan(sc);
   13410 		break;
   13411 	case WM_T_82574:
   13412 	case WM_T_82583:
   13413 		rv = wm_check_mng_mode_82574(sc);
   13414 		break;
   13415 	case WM_T_82571:
   13416 	case WM_T_82572:
   13417 	case WM_T_82573:
   13418 	case WM_T_80003:
   13419 		rv = wm_check_mng_mode_generic(sc);
   13420 		break;
   13421 	default:
   13422 		/* noting to do */
   13423 		rv = 0;
   13424 		break;
   13425 	}
   13426 
   13427 	return rv;
   13428 }
   13429 
   13430 static int
   13431 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13432 {
   13433 	uint32_t fwsm;
   13434 
   13435 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13436 
   13437 	if (((fwsm & FWSM_FW_VALID) != 0)
   13438 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13439 		return 1;
   13440 
   13441 	return 0;
   13442 }
   13443 
   13444 static int
   13445 wm_check_mng_mode_82574(struct wm_softc *sc)
   13446 {
   13447 	uint16_t data;
   13448 
   13449 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13450 
   13451 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13452 		return 1;
   13453 
   13454 	return 0;
   13455 }
   13456 
   13457 static int
   13458 wm_check_mng_mode_generic(struct wm_softc *sc)
   13459 {
   13460 	uint32_t fwsm;
   13461 
   13462 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13463 
   13464 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13465 		return 1;
   13466 
   13467 	return 0;
   13468 }
   13469 #endif /* WM_WOL */
   13470 
   13471 static int
   13472 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13473 {
   13474 	uint32_t manc, fwsm, factps;
   13475 
   13476 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13477 		return 0;
   13478 
   13479 	manc = CSR_READ(sc, WMREG_MANC);
   13480 
   13481 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13482 		device_xname(sc->sc_dev), manc));
   13483 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13484 		return 0;
   13485 
   13486 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13487 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13488 		factps = CSR_READ(sc, WMREG_FACTPS);
   13489 		if (((factps & FACTPS_MNGCG) == 0)
   13490 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13491 			return 1;
   13492 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13493 		uint16_t data;
   13494 
   13495 		factps = CSR_READ(sc, WMREG_FACTPS);
   13496 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13497 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13498 			device_xname(sc->sc_dev), factps, data));
   13499 		if (((factps & FACTPS_MNGCG) == 0)
   13500 		    && ((data & NVM_CFG2_MNGM_MASK)
   13501 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13502 			return 1;
   13503 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13504 	    && ((manc & MANC_ASF_EN) == 0))
   13505 		return 1;
   13506 
   13507 	return 0;
   13508 }
   13509 
   13510 static bool
   13511 wm_phy_resetisblocked(struct wm_softc *sc)
   13512 {
   13513 	bool blocked = false;
   13514 	uint32_t reg;
   13515 	int i = 0;
   13516 
   13517 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13518 		device_xname(sc->sc_dev), __func__));
   13519 
   13520 	switch (sc->sc_type) {
   13521 	case WM_T_ICH8:
   13522 	case WM_T_ICH9:
   13523 	case WM_T_ICH10:
   13524 	case WM_T_PCH:
   13525 	case WM_T_PCH2:
   13526 	case WM_T_PCH_LPT:
   13527 	case WM_T_PCH_SPT:
   13528 	case WM_T_PCH_CNP:
   13529 		do {
   13530 			reg = CSR_READ(sc, WMREG_FWSM);
   13531 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13532 				blocked = true;
   13533 				delay(10*1000);
   13534 				continue;
   13535 			}
   13536 			blocked = false;
   13537 		} while (blocked && (i++ < 30));
   13538 		return blocked;
   13539 		break;
   13540 	case WM_T_82571:
   13541 	case WM_T_82572:
   13542 	case WM_T_82573:
   13543 	case WM_T_82574:
   13544 	case WM_T_82583:
   13545 	case WM_T_80003:
   13546 		reg = CSR_READ(sc, WMREG_MANC);
   13547 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13548 			return true;
   13549 		else
   13550 			return false;
   13551 		break;
   13552 	default:
   13553 		/* no problem */
   13554 		break;
   13555 	}
   13556 
   13557 	return false;
   13558 }
   13559 
   13560 static void
   13561 wm_get_hw_control(struct wm_softc *sc)
   13562 {
   13563 	uint32_t reg;
   13564 
   13565 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13566 		device_xname(sc->sc_dev), __func__));
   13567 
   13568 	if (sc->sc_type == WM_T_82573) {
   13569 		reg = CSR_READ(sc, WMREG_SWSM);
   13570 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13571 	} else if (sc->sc_type >= WM_T_82571) {
   13572 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13573 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13574 	}
   13575 }
   13576 
   13577 static void
   13578 wm_release_hw_control(struct wm_softc *sc)
   13579 {
   13580 	uint32_t reg;
   13581 
   13582 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13583 		device_xname(sc->sc_dev), __func__));
   13584 
   13585 	if (sc->sc_type == WM_T_82573) {
   13586 		reg = CSR_READ(sc, WMREG_SWSM);
   13587 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13588 	} else if (sc->sc_type >= WM_T_82571) {
   13589 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13590 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13591 	}
   13592 }
   13593 
   13594 static void
   13595 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13596 {
   13597 	uint32_t reg;
   13598 
   13599 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13600 		device_xname(sc->sc_dev), __func__));
   13601 
   13602 	if (sc->sc_type < WM_T_PCH2)
   13603 		return;
   13604 
   13605 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13606 
   13607 	if (gate)
   13608 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13609 	else
   13610 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13611 
   13612 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13613 }
   13614 
   13615 static void
   13616 wm_smbustopci(struct wm_softc *sc)
   13617 {
   13618 	uint32_t fwsm, reg;
   13619 	int rv = 0;
   13620 
   13621 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13622 		device_xname(sc->sc_dev), __func__));
   13623 
   13624 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13625 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13626 
   13627 	/* Disable ULP */
   13628 	wm_ulp_disable(sc);
   13629 
   13630 	/* Acquire PHY semaphore */
   13631 	sc->phy.acquire(sc);
   13632 
   13633 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13634 	switch (sc->sc_type) {
   13635 	case WM_T_PCH_LPT:
   13636 	case WM_T_PCH_SPT:
   13637 	case WM_T_PCH_CNP:
   13638 		if (wm_phy_is_accessible_pchlan(sc))
   13639 			break;
   13640 
   13641 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13642 		reg |= CTRL_EXT_FORCE_SMBUS;
   13643 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13644 #if 0
   13645 		/* XXX Isn't this required??? */
   13646 		CSR_WRITE_FLUSH(sc);
   13647 #endif
   13648 		delay(50 * 1000);
   13649 		/* FALLTHROUGH */
   13650 	case WM_T_PCH2:
   13651 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13652 			break;
   13653 		/* FALLTHROUGH */
   13654 	case WM_T_PCH:
   13655 		if (sc->sc_type == WM_T_PCH)
   13656 			if ((fwsm & FWSM_FW_VALID) != 0)
   13657 				break;
   13658 
   13659 		if (wm_phy_resetisblocked(sc) == true) {
   13660 			printf("XXX reset is blocked(3)\n");
   13661 			break;
   13662 		}
   13663 
   13664 		wm_toggle_lanphypc_pch_lpt(sc);
   13665 
   13666 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13667 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13668 				break;
   13669 
   13670 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13671 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13672 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13673 
   13674 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13675 				break;
   13676 			rv = -1;
   13677 		}
   13678 		break;
   13679 	default:
   13680 		break;
   13681 	}
   13682 
   13683 	/* Release semaphore */
   13684 	sc->phy.release(sc);
   13685 
   13686 	if (rv == 0) {
   13687 		if (wm_phy_resetisblocked(sc)) {
   13688 			printf("XXX reset is blocked(4)\n");
   13689 			goto out;
   13690 		}
   13691 		wm_reset_phy(sc);
   13692 		if (wm_phy_resetisblocked(sc))
   13693 			printf("XXX reset is blocked(4)\n");
   13694 	}
   13695 
   13696 out:
   13697 	/*
   13698 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13699 	 */
   13700 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13701 		delay(10*1000);
   13702 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13703 	}
   13704 }
   13705 
   13706 static void
   13707 wm_init_manageability(struct wm_softc *sc)
   13708 {
   13709 
   13710 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13711 		device_xname(sc->sc_dev), __func__));
   13712 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13713 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13714 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13715 
   13716 		/* Disable hardware interception of ARP */
   13717 		manc &= ~MANC_ARP_EN;
   13718 
   13719 		/* Enable receiving management packets to the host */
   13720 		if (sc->sc_type >= WM_T_82571) {
   13721 			manc |= MANC_EN_MNG2HOST;
   13722 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13723 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13724 		}
   13725 
   13726 		CSR_WRITE(sc, WMREG_MANC, manc);
   13727 	}
   13728 }
   13729 
   13730 static void
   13731 wm_release_manageability(struct wm_softc *sc)
   13732 {
   13733 
   13734 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13735 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13736 
   13737 		manc |= MANC_ARP_EN;
   13738 		if (sc->sc_type >= WM_T_82571)
   13739 			manc &= ~MANC_EN_MNG2HOST;
   13740 
   13741 		CSR_WRITE(sc, WMREG_MANC, manc);
   13742 	}
   13743 }
   13744 
   13745 static void
   13746 wm_get_wakeup(struct wm_softc *sc)
   13747 {
   13748 
   13749 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13750 	switch (sc->sc_type) {
   13751 	case WM_T_82573:
   13752 	case WM_T_82583:
   13753 		sc->sc_flags |= WM_F_HAS_AMT;
   13754 		/* FALLTHROUGH */
   13755 	case WM_T_80003:
   13756 	case WM_T_82575:
   13757 	case WM_T_82576:
   13758 	case WM_T_82580:
   13759 	case WM_T_I350:
   13760 	case WM_T_I354:
   13761 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13762 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13763 		/* FALLTHROUGH */
   13764 	case WM_T_82541:
   13765 	case WM_T_82541_2:
   13766 	case WM_T_82547:
   13767 	case WM_T_82547_2:
   13768 	case WM_T_82571:
   13769 	case WM_T_82572:
   13770 	case WM_T_82574:
   13771 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13772 		break;
   13773 	case WM_T_ICH8:
   13774 	case WM_T_ICH9:
   13775 	case WM_T_ICH10:
   13776 	case WM_T_PCH:
   13777 	case WM_T_PCH2:
   13778 	case WM_T_PCH_LPT:
   13779 	case WM_T_PCH_SPT:
   13780 	case WM_T_PCH_CNP:
   13781 		sc->sc_flags |= WM_F_HAS_AMT;
   13782 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13783 		break;
   13784 	default:
   13785 		break;
   13786 	}
   13787 
   13788 	/* 1: HAS_MANAGE */
   13789 	if (wm_enable_mng_pass_thru(sc) != 0)
   13790 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13791 
   13792 	/*
   13793 	 * Note that the WOL flags is set after the resetting of the eeprom
   13794 	 * stuff
   13795 	 */
   13796 }
   13797 
   13798 /*
   13799  * Unconfigure Ultra Low Power mode.
   13800  * Only for I217 and newer (see below).
   13801  */
   13802 static void
   13803 wm_ulp_disable(struct wm_softc *sc)
   13804 {
   13805 	uint32_t reg;
   13806 	int i = 0;
   13807 
   13808 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13809 		device_xname(sc->sc_dev), __func__));
   13810 	/* Exclude old devices */
   13811 	if ((sc->sc_type < WM_T_PCH_LPT)
   13812 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13813 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13814 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13815 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13816 		return;
   13817 
   13818 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13819 		/* Request ME un-configure ULP mode in the PHY */
   13820 		reg = CSR_READ(sc, WMREG_H2ME);
   13821 		reg &= ~H2ME_ULP;
   13822 		reg |= H2ME_ENFORCE_SETTINGS;
   13823 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13824 
   13825 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13826 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13827 			if (i++ == 30) {
   13828 				printf("%s timed out\n", __func__);
   13829 				return;
   13830 			}
   13831 			delay(10 * 1000);
   13832 		}
   13833 		reg = CSR_READ(sc, WMREG_H2ME);
   13834 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13835 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13836 
   13837 		return;
   13838 	}
   13839 
   13840 	/* Acquire semaphore */
   13841 	sc->phy.acquire(sc);
   13842 
   13843 	/* Toggle LANPHYPC */
   13844 	wm_toggle_lanphypc_pch_lpt(sc);
   13845 
   13846 	/* Unforce SMBus mode in PHY */
   13847 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13848 	if (reg == 0x0000 || reg == 0xffff) {
   13849 		uint32_t reg2;
   13850 
   13851 		printf("%s: Force SMBus first.\n", __func__);
   13852 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13853 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13854 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13855 		delay(50 * 1000);
   13856 
   13857 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13858 	}
   13859 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13860 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13861 
   13862 	/* Unforce SMBus mode in MAC */
   13863 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13864 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13865 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13866 
   13867 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13868 	reg |= HV_PM_CTRL_K1_ENA;
   13869 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13870 
   13871 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13872 	reg &= ~(I218_ULP_CONFIG1_IND
   13873 	    | I218_ULP_CONFIG1_STICKY_ULP
   13874 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13875 	    | I218_ULP_CONFIG1_WOL_HOST
   13876 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13877 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13878 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13879 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13880 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13881 	reg |= I218_ULP_CONFIG1_START;
   13882 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13883 
   13884 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13885 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13886 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13887 
   13888 	/* Release semaphore */
   13889 	sc->phy.release(sc);
   13890 	wm_gmii_reset(sc);
   13891 	delay(50 * 1000);
   13892 }
   13893 
   13894 /* WOL in the newer chipset interfaces (pchlan) */
   13895 static void
   13896 wm_enable_phy_wakeup(struct wm_softc *sc)
   13897 {
   13898 #if 0
   13899 	uint16_t preg;
   13900 
   13901 	/* Copy MAC RARs to PHY RARs */
   13902 
   13903 	/* Copy MAC MTA to PHY MTA */
   13904 
   13905 	/* Configure PHY Rx Control register */
   13906 
   13907 	/* Enable PHY wakeup in MAC register */
   13908 
   13909 	/* Configure and enable PHY wakeup in PHY registers */
   13910 
   13911 	/* Activate PHY wakeup */
   13912 
   13913 	/* XXX */
   13914 #endif
   13915 }
   13916 
   13917 /* Power down workaround on D3 */
   13918 static void
   13919 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13920 {
   13921 	uint32_t reg;
   13922 	int i;
   13923 
   13924 	for (i = 0; i < 2; i++) {
   13925 		/* Disable link */
   13926 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13927 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13928 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13929 
   13930 		/*
   13931 		 * Call gig speed drop workaround on Gig disable before
   13932 		 * accessing any PHY registers
   13933 		 */
   13934 		if (sc->sc_type == WM_T_ICH8)
   13935 			wm_gig_downshift_workaround_ich8lan(sc);
   13936 
   13937 		/* Write VR power-down enable */
   13938 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13939 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13940 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13941 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13942 
   13943 		/* Read it back and test */
   13944 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13945 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13946 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13947 			break;
   13948 
   13949 		/* Issue PHY reset and repeat at most one more time */
   13950 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13951 	}
   13952 }
   13953 
   13954 static void
   13955 wm_enable_wakeup(struct wm_softc *sc)
   13956 {
   13957 	uint32_t reg, pmreg;
   13958 	pcireg_t pmode;
   13959 
   13960 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13961 		device_xname(sc->sc_dev), __func__));
   13962 
   13963 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13964 		&pmreg, NULL) == 0)
   13965 		return;
   13966 
   13967 	/* Advertise the wakeup capability */
   13968 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13969 	    | CTRL_SWDPIN(3));
   13970 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13971 
   13972 	/* ICH workaround */
   13973 	switch (sc->sc_type) {
   13974 	case WM_T_ICH8:
   13975 	case WM_T_ICH9:
   13976 	case WM_T_ICH10:
   13977 	case WM_T_PCH:
   13978 	case WM_T_PCH2:
   13979 	case WM_T_PCH_LPT:
   13980 	case WM_T_PCH_SPT:
   13981 	case WM_T_PCH_CNP:
   13982 		/* Disable gig during WOL */
   13983 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13984 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13985 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13986 		if (sc->sc_type == WM_T_PCH)
   13987 			wm_gmii_reset(sc);
   13988 
   13989 		/* Power down workaround */
   13990 		if (sc->sc_phytype == WMPHY_82577) {
   13991 			struct mii_softc *child;
   13992 
   13993 			/* Assume that the PHY is copper */
   13994 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13995 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13996 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13997 				    (768 << 5) | 25, 0x0444); /* magic num */
   13998 		}
   13999 		break;
   14000 	default:
   14001 		break;
   14002 	}
   14003 
   14004 	/* Keep the laser running on fiber adapters */
   14005 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14006 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14007 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14008 		reg |= CTRL_EXT_SWDPIN(3);
   14009 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14010 	}
   14011 
   14012 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14013 #if 0	/* for the multicast packet */
   14014 	reg |= WUFC_MC;
   14015 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14016 #endif
   14017 
   14018 	if (sc->sc_type >= WM_T_PCH)
   14019 		wm_enable_phy_wakeup(sc);
   14020 	else {
   14021 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14022 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14023 	}
   14024 
   14025 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14026 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14027 		|| (sc->sc_type == WM_T_PCH2))
   14028 		    && (sc->sc_phytype == WMPHY_IGP_3))
   14029 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14030 
   14031 	/* Request PME */
   14032 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14033 #if 0
   14034 	/* Disable WOL */
   14035 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14036 #else
   14037 	/* For WOL */
   14038 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14039 #endif
   14040 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14041 }
   14042 
   14043 /* Disable ASPM L0s and/or L1 for workaround */
   14044 static void
   14045 wm_disable_aspm(struct wm_softc *sc)
   14046 {
   14047 	pcireg_t reg, mask = 0;
   14048 	unsigned const char *str = "";
   14049 
   14050 	/*
   14051 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14052 	 * space.
   14053 	 */
   14054 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14055 		return;
   14056 
   14057 	switch (sc->sc_type) {
   14058 	case WM_T_82571:
   14059 	case WM_T_82572:
   14060 		/*
   14061 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14062 		 * State Power management L1 State (ASPM L1).
   14063 		 */
   14064 		mask = PCIE_LCSR_ASPM_L1;
   14065 		str = "L1 is";
   14066 		break;
   14067 	case WM_T_82573:
   14068 	case WM_T_82574:
   14069 	case WM_T_82583:
   14070 		/*
   14071 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14072 		 *
   14073 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14074 		 * some chipset.  The document of 82574 and 82583 says that
   14075 		 * disabling L0s with some specific chipset is sufficient,
   14076 		 * but we follow as of the Intel em driver does.
   14077 		 *
   14078 		 * References:
   14079 		 * Errata 8 of the Specification Update of i82573.
   14080 		 * Errata 20 of the Specification Update of i82574.
   14081 		 * Errata 9 of the Specification Update of i82583.
   14082 		 */
   14083 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14084 		str = "L0s and L1 are";
   14085 		break;
   14086 	default:
   14087 		return;
   14088 	}
   14089 
   14090 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14091 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14092 	reg &= ~mask;
   14093 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14094 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14095 
   14096 	/* Print only in wm_attach() */
   14097 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14098 		aprint_verbose_dev(sc->sc_dev,
   14099 		    "ASPM %s disabled to workaround the errata.\n",
   14100 			str);
   14101 }
   14102 
   14103 /* LPLU */
   14104 
   14105 static void
   14106 wm_lplu_d0_disable(struct wm_softc *sc)
   14107 {
   14108 	struct mii_data *mii = &sc->sc_mii;
   14109 	uint32_t reg;
   14110 
   14111 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14112 		device_xname(sc->sc_dev), __func__));
   14113 
   14114 	if (sc->sc_phytype == WMPHY_IFE)
   14115 		return;
   14116 
   14117 	switch (sc->sc_type) {
   14118 	case WM_T_82571:
   14119 	case WM_T_82572:
   14120 	case WM_T_82573:
   14121 	case WM_T_82575:
   14122 	case WM_T_82576:
   14123 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14124 		reg &= ~PMR_D0_LPLU;
   14125 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14126 		break;
   14127 	case WM_T_82580:
   14128 	case WM_T_I350:
   14129 	case WM_T_I210:
   14130 	case WM_T_I211:
   14131 		reg = CSR_READ(sc, WMREG_PHPM);
   14132 		reg &= ~PHPM_D0A_LPLU;
   14133 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14134 		break;
   14135 	case WM_T_82574:
   14136 	case WM_T_82583:
   14137 	case WM_T_ICH8:
   14138 	case WM_T_ICH9:
   14139 	case WM_T_ICH10:
   14140 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14141 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14142 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14143 		CSR_WRITE_FLUSH(sc);
   14144 		break;
   14145 	case WM_T_PCH:
   14146 	case WM_T_PCH2:
   14147 	case WM_T_PCH_LPT:
   14148 	case WM_T_PCH_SPT:
   14149 	case WM_T_PCH_CNP:
   14150 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14151 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14152 		if (wm_phy_resetisblocked(sc) == false)
   14153 			reg |= HV_OEM_BITS_ANEGNOW;
   14154 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14155 		break;
   14156 	default:
   14157 		break;
   14158 	}
   14159 }
   14160 
   14161 /* EEE */
   14162 
   14163 static void
   14164 wm_set_eee_i350(struct wm_softc *sc)
   14165 {
   14166 	uint32_t ipcnfg, eeer;
   14167 
   14168 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14169 	eeer = CSR_READ(sc, WMREG_EEER);
   14170 
   14171 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14172 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14173 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14174 		    | EEER_LPI_FC);
   14175 	} else {
   14176 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14177 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14178 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14179 		    | EEER_LPI_FC);
   14180 	}
   14181 
   14182 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14183 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14184 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14185 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14186 }
   14187 
   14188 /*
   14189  * Workarounds (mainly PHY related).
   14190  * Basically, PHY's workarounds are in the PHY drivers.
   14191  */
   14192 
   14193 /* Work-around for 82566 Kumeran PCS lock loss */
   14194 static void
   14195 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14196 {
   14197 	struct mii_data *mii = &sc->sc_mii;
   14198 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14199 	int i;
   14200 	int reg;
   14201 
   14202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14203 		device_xname(sc->sc_dev), __func__));
   14204 
   14205 	/* If the link is not up, do nothing */
   14206 	if ((status & STATUS_LU) == 0)
   14207 		return;
   14208 
   14209 	/* Nothing to do if the link is other than 1Gbps */
   14210 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14211 		return;
   14212 
   14213 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14214 	for (i = 0; i < 10; i++) {
   14215 		/* read twice */
   14216 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14217 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14218 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14219 			goto out;	/* GOOD! */
   14220 
   14221 		/* Reset the PHY */
   14222 		wm_reset_phy(sc);
   14223 		delay(5*1000);
   14224 	}
   14225 
   14226 	/* Disable GigE link negotiation */
   14227 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14228 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14229 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14230 
   14231 	/*
   14232 	 * Call gig speed drop workaround on Gig disable before accessing
   14233 	 * any PHY registers.
   14234 	 */
   14235 	wm_gig_downshift_workaround_ich8lan(sc);
   14236 
   14237 out:
   14238 	return;
   14239 }
   14240 
   14241 /* WOL from S5 stops working */
   14242 static void
   14243 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14244 {
   14245 	uint16_t kmreg;
   14246 
   14247 	/* Only for igp3 */
   14248 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14249 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14250 			return;
   14251 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14252 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14253 			return;
   14254 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14255 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14256 	}
   14257 }
   14258 
   14259 /*
   14260  * Workaround for pch's PHYs
   14261  * XXX should be moved to new PHY driver?
   14262  */
   14263 static void
   14264 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14265 {
   14266 
   14267 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14268 		device_xname(sc->sc_dev), __func__));
   14269 	KASSERT(sc->sc_type == WM_T_PCH);
   14270 
   14271 	if (sc->sc_phytype == WMPHY_82577)
   14272 		wm_set_mdio_slow_mode_hv(sc);
   14273 
   14274 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14275 
   14276 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14277 
   14278 	/* 82578 */
   14279 	if (sc->sc_phytype == WMPHY_82578) {
   14280 		struct mii_softc *child;
   14281 
   14282 		/*
   14283 		 * Return registers to default by doing a soft reset then
   14284 		 * writing 0x3140 to the control register
   14285 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14286 		 */
   14287 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14288 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14289 			PHY_RESET(child);
   14290 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14291 			    0x3140);
   14292 		}
   14293 	}
   14294 
   14295 	/* Select page 0 */
   14296 	sc->phy.acquire(sc);
   14297 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14298 	sc->phy.release(sc);
   14299 
   14300 	/*
   14301 	 * Configure the K1 Si workaround during phy reset assuming there is
   14302 	 * link so that it disables K1 if link is in 1Gbps.
   14303 	 */
   14304 	wm_k1_gig_workaround_hv(sc, 1);
   14305 }
   14306 
   14307 static void
   14308 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14309 {
   14310 
   14311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14312 		device_xname(sc->sc_dev), __func__));
   14313 	KASSERT(sc->sc_type == WM_T_PCH2);
   14314 
   14315 	wm_set_mdio_slow_mode_hv(sc);
   14316 }
   14317 
   14318 static int
   14319 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14320 {
   14321 	int k1_enable = sc->sc_nvm_k1_enabled;
   14322 
   14323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14324 		device_xname(sc->sc_dev), __func__));
   14325 
   14326 	if (sc->phy.acquire(sc) != 0)
   14327 		return -1;
   14328 
   14329 	if (link) {
   14330 		k1_enable = 0;
   14331 
   14332 		/* Link stall fix for link up */
   14333 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14334 		    0x0100);
   14335 	} else {
   14336 		/* Link stall fix for link down */
   14337 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14338 		    0x4100);
   14339 	}
   14340 
   14341 	wm_configure_k1_ich8lan(sc, k1_enable);
   14342 	sc->phy.release(sc);
   14343 
   14344 	return 0;
   14345 }
   14346 
   14347 static void
   14348 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14349 {
   14350 	uint32_t reg;
   14351 
   14352 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14353 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14354 	    reg | HV_KMRN_MDIO_SLOW);
   14355 }
   14356 
   14357 static void
   14358 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14359 {
   14360 	uint32_t ctrl, ctrl_ext, tmp;
   14361 	uint16_t kmreg;
   14362 	int rv;
   14363 
   14364 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14365 	if (rv != 0)
   14366 		return;
   14367 
   14368 	if (k1_enable)
   14369 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14370 	else
   14371 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14372 
   14373 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14374 	if (rv != 0)
   14375 		return;
   14376 
   14377 	delay(20);
   14378 
   14379 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14380 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14381 
   14382 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14383 	tmp |= CTRL_FRCSPD;
   14384 
   14385 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14386 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14387 	CSR_WRITE_FLUSH(sc);
   14388 	delay(20);
   14389 
   14390 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14391 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14392 	CSR_WRITE_FLUSH(sc);
   14393 	delay(20);
   14394 
   14395 	return;
   14396 }
   14397 
   14398 /* special case - for 82575 - need to do manual init ... */
   14399 static void
   14400 wm_reset_init_script_82575(struct wm_softc *sc)
   14401 {
   14402 	/*
   14403 	 * remark: this is untested code - we have no board without EEPROM
   14404 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14405 	 */
   14406 
   14407 	/* SerDes configuration via SERDESCTRL */
   14408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14410 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14411 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14412 
   14413 	/* CCM configuration via CCMCTL register */
   14414 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14415 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14416 
   14417 	/* PCIe lanes configuration */
   14418 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14419 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14420 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14421 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14422 
   14423 	/* PCIe PLL Configuration */
   14424 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14425 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14426 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14427 }
   14428 
   14429 static void
   14430 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14431 {
   14432 	uint32_t reg;
   14433 	uint16_t nvmword;
   14434 	int rv;
   14435 
   14436 	if (sc->sc_type != WM_T_82580)
   14437 		return;
   14438 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14439 		return;
   14440 
   14441 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14442 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14443 	if (rv != 0) {
   14444 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14445 		    __func__);
   14446 		return;
   14447 	}
   14448 
   14449 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14450 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14451 		reg |= MDICNFG_DEST;
   14452 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14453 		reg |= MDICNFG_COM_MDIO;
   14454 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14455 }
   14456 
   14457 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14458 
   14459 static bool
   14460 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14461 {
   14462 	int i;
   14463 	uint32_t reg;
   14464 	uint16_t id1, id2;
   14465 
   14466 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14467 		device_xname(sc->sc_dev), __func__));
   14468 	id1 = id2 = 0xffff;
   14469 	for (i = 0; i < 2; i++) {
   14470 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14471 		if (MII_INVALIDID(id1))
   14472 			continue;
   14473 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14474 		if (MII_INVALIDID(id2))
   14475 			continue;
   14476 		break;
   14477 	}
   14478 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14479 		goto out;
   14480 	}
   14481 
   14482 	if (sc->sc_type < WM_T_PCH_LPT) {
   14483 		sc->phy.release(sc);
   14484 		wm_set_mdio_slow_mode_hv(sc);
   14485 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14486 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14487 		sc->phy.acquire(sc);
   14488 	}
   14489 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14490 		printf("XXX return with false\n");
   14491 		return false;
   14492 	}
   14493 out:
   14494 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14495 		/* Only unforce SMBus if ME is not active */
   14496 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14497 			/* Unforce SMBus mode in PHY */
   14498 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14499 			    CV_SMB_CTRL);
   14500 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14501 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14502 			    CV_SMB_CTRL, reg);
   14503 
   14504 			/* Unforce SMBus mode in MAC */
   14505 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14506 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14507 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14508 		}
   14509 	}
   14510 	return true;
   14511 }
   14512 
   14513 static void
   14514 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14515 {
   14516 	uint32_t reg;
   14517 	int i;
   14518 
   14519 	/* Set PHY Config Counter to 50msec */
   14520 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14521 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14522 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14523 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14524 
   14525 	/* Toggle LANPHYPC */
   14526 	reg = CSR_READ(sc, WMREG_CTRL);
   14527 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14528 	reg &= ~CTRL_LANPHYPC_VALUE;
   14529 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14530 	CSR_WRITE_FLUSH(sc);
   14531 	delay(1000);
   14532 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14533 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14534 	CSR_WRITE_FLUSH(sc);
   14535 
   14536 	if (sc->sc_type < WM_T_PCH_LPT)
   14537 		delay(50 * 1000);
   14538 	else {
   14539 		i = 20;
   14540 
   14541 		do {
   14542 			delay(5 * 1000);
   14543 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14544 		    && i--);
   14545 
   14546 		delay(30 * 1000);
   14547 	}
   14548 }
   14549 
   14550 static int
   14551 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14552 {
   14553 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14554 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14555 	uint32_t rxa;
   14556 	uint16_t scale = 0, lat_enc = 0;
   14557 	int32_t obff_hwm = 0;
   14558 	int64_t lat_ns, value;
   14559 
   14560 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14561 		device_xname(sc->sc_dev), __func__));
   14562 
   14563 	if (link) {
   14564 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14565 		uint32_t status;
   14566 		uint16_t speed;
   14567 		pcireg_t preg;
   14568 
   14569 		status = CSR_READ(sc, WMREG_STATUS);
   14570 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14571 		case STATUS_SPEED_10:
   14572 			speed = 10;
   14573 			break;
   14574 		case STATUS_SPEED_100:
   14575 			speed = 100;
   14576 			break;
   14577 		case STATUS_SPEED_1000:
   14578 			speed = 1000;
   14579 			break;
   14580 		default:
   14581 			device_printf(sc->sc_dev, "Unknown speed "
   14582 			    "(status = %08x)\n", status);
   14583 			return -1;
   14584 		}
   14585 
   14586 		/* Rx Packet Buffer Allocation size (KB) */
   14587 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14588 
   14589 		/*
   14590 		 * Determine the maximum latency tolerated by the device.
   14591 		 *
   14592 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14593 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14594 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14595 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14596 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14597 		 */
   14598 		lat_ns = ((int64_t)rxa * 1024 -
   14599 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14600 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14601 		if (lat_ns < 0)
   14602 			lat_ns = 0;
   14603 		else
   14604 			lat_ns /= speed;
   14605 		value = lat_ns;
   14606 
   14607 		while (value > LTRV_VALUE) {
   14608 			scale ++;
   14609 			value = howmany(value, __BIT(5));
   14610 		}
   14611 		if (scale > LTRV_SCALE_MAX) {
   14612 			printf("%s: Invalid LTR latency scale %d\n",
   14613 			    device_xname(sc->sc_dev), scale);
   14614 			return -1;
   14615 		}
   14616 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14617 
   14618 		/* Determine the maximum latency tolerated by the platform */
   14619 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14620 		    WM_PCI_LTR_CAP_LPT);
   14621 		max_snoop = preg & 0xffff;
   14622 		max_nosnoop = preg >> 16;
   14623 
   14624 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14625 
   14626 		if (lat_enc > max_ltr_enc) {
   14627 			lat_enc = max_ltr_enc;
   14628 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14629 			    * PCI_LTR_SCALETONS(
   14630 				    __SHIFTOUT(lat_enc,
   14631 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14632 		}
   14633 
   14634 		if (lat_ns) {
   14635 			lat_ns *= speed * 1000;
   14636 			lat_ns /= 8;
   14637 			lat_ns /= 1000000000;
   14638 			obff_hwm = (int32_t)(rxa - lat_ns);
   14639 		}
   14640 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14641 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14642 			    "(rxa = %d, lat_ns = %d)\n",
   14643 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14644 			return -1;
   14645 		}
   14646 	}
   14647 	/* Snoop and No-Snoop latencies the same */
   14648 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14649 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14650 
   14651 	/* Set OBFF high water mark */
   14652 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14653 	reg |= obff_hwm;
   14654 	CSR_WRITE(sc, WMREG_SVT, reg);
   14655 
   14656 	/* Enable OBFF */
   14657 	reg = CSR_READ(sc, WMREG_SVCR);
   14658 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14659 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14660 
   14661 	return 0;
   14662 }
   14663 
   14664 /*
   14665  * I210 Errata 25 and I211 Errata 10
   14666  * Slow System Clock.
   14667  */
   14668 static void
   14669 wm_pll_workaround_i210(struct wm_softc *sc)
   14670 {
   14671 	uint32_t mdicnfg, wuc;
   14672 	uint32_t reg;
   14673 	pcireg_t pcireg;
   14674 	uint32_t pmreg;
   14675 	uint16_t nvmword, tmp_nvmword;
   14676 	int phyval;
   14677 	bool wa_done = false;
   14678 	int i;
   14679 
   14680 	/* Save WUC and MDICNFG registers */
   14681 	wuc = CSR_READ(sc, WMREG_WUC);
   14682 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14683 
   14684 	reg = mdicnfg & ~MDICNFG_DEST;
   14685 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14686 
   14687 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14688 		nvmword = INVM_DEFAULT_AL;
   14689 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14690 
   14691 	/* Get Power Management cap offset */
   14692 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14693 		&pmreg, NULL) == 0)
   14694 		return;
   14695 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14696 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14697 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14698 
   14699 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14700 			break; /* OK */
   14701 		}
   14702 
   14703 		wa_done = true;
   14704 		/* Directly reset the internal PHY */
   14705 		reg = CSR_READ(sc, WMREG_CTRL);
   14706 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14707 
   14708 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14709 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14710 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14711 
   14712 		CSR_WRITE(sc, WMREG_WUC, 0);
   14713 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14714 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14715 
   14716 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14717 		    pmreg + PCI_PMCSR);
   14718 		pcireg |= PCI_PMCSR_STATE_D3;
   14719 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14720 		    pmreg + PCI_PMCSR, pcireg);
   14721 		delay(1000);
   14722 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14723 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14724 		    pmreg + PCI_PMCSR, pcireg);
   14725 
   14726 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14727 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14728 
   14729 		/* Restore WUC register */
   14730 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14731 	}
   14732 
   14733 	/* Restore MDICNFG setting */
   14734 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14735 	if (wa_done)
   14736 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14737 }
   14738 
   14739 static void
   14740 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14741 {
   14742 	uint32_t reg;
   14743 
   14744 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14745 		device_xname(sc->sc_dev), __func__));
   14746 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14747 
   14748 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14749 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14750 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14751 
   14752 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14753 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14754 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14755 }
   14756