Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.570
      1 /*	$NetBSD: if_wm.c,v 1.570 2018/04/12 03:25:08 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.570 2018/04/12 03:25:08 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544.  We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames.
    203  */
    204 #define	WM_NTXSEGS		256
    205 #define	WM_IFQUEUELEN		256
    206 #define	WM_TXQUEUELEN_MAX	64
    207 #define	WM_TXQUEUELEN_MAX_82547	16
    208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    211 #define	WM_NTXDESC_82542	256
    212 #define	WM_NTXDESC_82544	4096
    213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    218 
    219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    220 
    221 #define	WM_TXINTERQSIZE		256
    222 
    223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    225 #endif
    226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    228 #endif
    229 
    230 /*
    231  * Receive descriptor list size.  We have one Rx buffer for normal
    232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    233  * packet.  We allocate 256 receive descriptors, each with a 2k
    234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    235  */
    236 #define	WM_NRXDESC		256
    237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    240 
    241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 typedef union txdescs {
    249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    251 } txdescs_t;
    252 
    253 typedef union rxdescs {
    254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    257 } rxdescs_t;
    258 
    259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    261 
    262 /*
    263  * Software state for transmit jobs.
    264  */
    265 struct wm_txsoft {
    266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    268 	int txs_firstdesc;		/* first descriptor in packet */
    269 	int txs_lastdesc;		/* last descriptor in packet */
    270 	int txs_ndesc;			/* # of descriptors used */
    271 };
    272 
    273 /*
    274  * Software state for receive buffers.  Each descriptor gets a
    275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    276  * more than one buffer, we chain them together.
    277  */
    278 struct wm_rxsoft {
    279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    281 };
    282 
    283 #define WM_LINKUP_TIMEOUT	50
    284 
    285 static uint16_t swfwphysem[] = {
    286 	SWFW_PHY0_SM,
    287 	SWFW_PHY1_SM,
    288 	SWFW_PHY2_SM,
    289 	SWFW_PHY3_SM
    290 };
    291 
    292 static const uint32_t wm_82580_rxpbs_table[] = {
    293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    294 };
    295 
    296 struct wm_softc;
    297 
    298 #ifdef WM_EVENT_COUNTERS
    299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    301 	struct evcnt qname##_ev_##evname;
    302 
    303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    304 	do{								\
    305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    307 		    "%s%02d%s", #qname, (qnum), #evname);		\
    308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    309 		    (evtype), NULL, (xname),				\
    310 		    (q)->qname##_##evname##_evcnt_name);		\
    311 	}while(0)
    312 
    313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    315 
    316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    318 
    319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    320 	evcnt_detach(&(q)->qname##_ev_##evname);
    321 #endif /* WM_EVENT_COUNTERS */
    322 
    323 struct wm_txqueue {
    324 	kmutex_t *txq_lock;		/* lock for tx operations */
    325 
    326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    327 
    328 	/* Software state for the transmit descriptors. */
    329 	int txq_num;			/* must be a power of two */
    330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    331 
    332 	/* TX control data structures. */
    333 	int txq_ndesc;			/* must be a power of two */
    334 	size_t txq_descsize;		/* a tx descriptor size */
    335 	txdescs_t *txq_descs_u;
    336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    338 	int txq_desc_rseg;		/* real number of control segment */
    339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    340 #define	txq_descs	txq_descs_u->sctxu_txdescs
    341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    342 
    343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    344 
    345 	int txq_free;			/* number of free Tx descriptors */
    346 	int txq_next;			/* next ready Tx descriptor */
    347 
    348 	int txq_sfree;			/* number of free Tx jobs */
    349 	int txq_snext;			/* next free Tx job */
    350 	int txq_sdirty;			/* dirty Tx jobs */
    351 
    352 	/* These 4 variables are used only on the 82547. */
    353 	int txq_fifo_size;		/* Tx FIFO size */
    354 	int txq_fifo_head;		/* current head of FIFO */
    355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    357 
    358 	/*
    359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    360 	 * CPUs. This queue intermediate them without block.
    361 	 */
    362 	pcq_t *txq_interq;
    363 
    364 	/*
    365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    366 	 * to manage Tx H/W queue's busy flag.
    367 	 */
    368 	int txq_flags;			/* flags for H/W queue, see below */
    369 #define	WM_TXQ_NO_SPACE	0x1
    370 
    371 	bool txq_stopping;
    372 
    373 	bool txq_watchdog;
    374 	time_t txq_lastsent;
    375 
    376 	uint32_t txq_packets;		/* for AIM */
    377 	uint32_t txq_bytes;		/* for AIM */
    378 #ifdef WM_EVENT_COUNTERS
    379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    384 						/* XXX not used? */
    385 
    386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    394 
    395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    396 
    397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    399 #endif /* WM_EVENT_COUNTERS */
    400 };
    401 
    402 struct wm_rxqueue {
    403 	kmutex_t *rxq_lock;		/* lock for rx operations */
    404 
    405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    406 
    407 	/* Software state for the receive descriptors. */
    408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    409 
    410 	/* RX control data structures. */
    411 	int rxq_ndesc;			/* must be a power of two */
    412 	size_t rxq_descsize;		/* a rx descriptor size */
    413 	rxdescs_t *rxq_descs_u;
    414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    416 	int rxq_desc_rseg;		/* real number of control segment */
    417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    421 
    422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    423 
    424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    425 	int rxq_discard;
    426 	int rxq_len;
    427 	struct mbuf *rxq_head;
    428 	struct mbuf *rxq_tail;
    429 	struct mbuf **rxq_tailp;
    430 
    431 	bool rxq_stopping;
    432 
    433 	uint32_t rxq_packets;		/* for AIM */
    434 	uint32_t rxq_bytes;		/* for AIM */
    435 #ifdef WM_EVENT_COUNTERS
    436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    438 
    439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    441 #endif
    442 };
    443 
    444 struct wm_queue {
    445 	int wmq_id;			/* index of transmit and receive queues */
    446 	int wmq_intr_idx;		/* index of MSI-X tables */
    447 
    448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    449 	bool wmq_set_itr;
    450 
    451 	struct wm_txqueue wmq_txq;
    452 	struct wm_rxqueue wmq_rxq;
    453 
    454 	void *wmq_si;
    455 };
    456 
    457 struct wm_phyop {
    458 	int (*acquire)(struct wm_softc *);
    459 	void (*release)(struct wm_softc *);
    460 	int reset_delay_us;
    461 };
    462 
    463 struct wm_nvmop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    467 };
    468 
    469 /*
    470  * Software state per device.
    471  */
    472 struct wm_softc {
    473 	device_t sc_dev;		/* generic device information */
    474 	bus_space_tag_t sc_st;		/* bus space tag */
    475 	bus_space_handle_t sc_sh;	/* bus space handle */
    476 	bus_size_t sc_ss;		/* bus space size */
    477 	bus_space_tag_t sc_iot;		/* I/O space tag */
    478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    479 	bus_size_t sc_ios;		/* I/O space size */
    480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    482 	bus_size_t sc_flashs;		/* flash registers space size */
    483 	off_t sc_flashreg_offset;	/*
    484 					 * offset to flash registers from
    485 					 * start of BAR
    486 					 */
    487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    488 
    489 	struct ethercom sc_ethercom;	/* ethernet common data */
    490 	struct mii_data sc_mii;		/* MII/media information */
    491 
    492 	pci_chipset_tag_t sc_pc;
    493 	pcitag_t sc_pcitag;
    494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    496 
    497 	uint16_t sc_pcidevid;		/* PCI device ID */
    498 	wm_chip_type sc_type;		/* MAC type */
    499 	int sc_rev;			/* MAC revision */
    500 	wm_phy_type sc_phytype;		/* PHY type */
    501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    502 #define	WM_MEDIATYPE_UNKNOWN		0x00
    503 #define	WM_MEDIATYPE_FIBER		0x01
    504 #define	WM_MEDIATYPE_COPPER		0x02
    505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    507 	int sc_flags;			/* flags; see below */
    508 	int sc_if_flags;		/* last if_flags */
    509 	int sc_flowflags;		/* 802.3x flow control flags */
    510 	int sc_align_tweak;
    511 
    512 	void *sc_ihs[WM_MAX_NINTR];	/*
    513 					 * interrupt cookie.
    514 					 * - legacy and msi use sc_ihs[0] only
    515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    516 					 */
    517 	pci_intr_handle_t *sc_intrs;	/*
    518 					 * legacy and msi use sc_intrs[0] only
    519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    520 					 */
    521 	int sc_nintrs;			/* number of interrupts */
    522 
    523 	int sc_link_intr_idx;		/* index of MSI-X tables */
    524 
    525 	callout_t sc_tick_ch;		/* tick callout */
    526 	bool sc_core_stopping;
    527 
    528 	int sc_nvm_ver_major;
    529 	int sc_nvm_ver_minor;
    530 	int sc_nvm_ver_build;
    531 	int sc_nvm_addrbits;		/* NVM address bits */
    532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    533 	int sc_ich8_flash_base;
    534 	int sc_ich8_flash_bank_size;
    535 	int sc_nvm_k1_enabled;
    536 
    537 	int sc_nqueues;
    538 	struct wm_queue *sc_queue;
    539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    543 
    544 	int sc_affinity_offset;
    545 
    546 #ifdef WM_EVENT_COUNTERS
    547 	/* Event counters. */
    548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    549 
    550         /* WM_T_82542_2_1 only */
    551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    556 #endif /* WM_EVENT_COUNTERS */
    557 
    558 	/* This variable are used only on the 82547. */
    559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    560 
    561 	uint32_t sc_ctrl;		/* prototype CTRL register */
    562 #if 0
    563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    564 #endif
    565 	uint32_t sc_icr;		/* prototype interrupt bits */
    566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    567 	uint32_t sc_tctl;		/* prototype TCTL register */
    568 	uint32_t sc_rctl;		/* prototype RCTL register */
    569 	uint32_t sc_txcw;		/* prototype TXCW register */
    570 	uint32_t sc_tipg;		/* prototype TIPG register */
    571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    572 	uint32_t sc_pba;		/* prototype PBA register */
    573 
    574 	int sc_tbi_linkup;		/* TBI link status */
    575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    577 
    578 	int sc_mchash_type;		/* multicast filter offset */
    579 
    580 	krndsource_t rnd_source;	/* random source */
    581 
    582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    583 
    584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    585 	kmutex_t *sc_ich_phymtx;	/*
    586 					 * 82574/82583/ICH/PCH specific PHY
    587 					 * mutex. For 82574/82583, the mutex
    588 					 * is used for both PHY and NVM.
    589 					 */
    590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    591 
    592 	struct wm_phyop phy;
    593 	struct wm_nvmop nvm;
    594 };
    595 
    596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    599 
    600 #define	WM_RXCHAIN_RESET(rxq)						\
    601 do {									\
    602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    603 	*(rxq)->rxq_tailp = NULL;					\
    604 	(rxq)->rxq_len = 0;						\
    605 } while (/*CONSTCOND*/0)
    606 
    607 #define	WM_RXCHAIN_LINK(rxq, m)						\
    608 do {									\
    609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    610 	(rxq)->rxq_tailp = &(m)->m_next;				\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #ifdef WM_EVENT_COUNTERS
    614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    616 
    617 #define WM_Q_EVCNT_INCR(qname, evname)			\
    618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    621 #else /* !WM_EVENT_COUNTERS */
    622 #define	WM_EVCNT_INCR(ev)	/* nothing */
    623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    627 #endif /* !WM_EVENT_COUNTERS */
    628 
    629 #define	CSR_READ(sc, reg)						\
    630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    631 #define	CSR_WRITE(sc, reg, val)						\
    632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    633 #define	CSR_WRITE_FLUSH(sc)						\
    634 	(void) CSR_READ((sc), WMREG_STATUS)
    635 
    636 #define ICH8_FLASH_READ32(sc, reg)					\
    637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    638 	    (reg) + sc->sc_flashreg_offset)
    639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    641 	    (reg) + sc->sc_flashreg_offset, (data))
    642 
    643 #define ICH8_FLASH_READ16(sc, reg)					\
    644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    645 	    (reg) + sc->sc_flashreg_offset)
    646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset, (data))
    649 
    650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    652 
    653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    654 #define	WM_CDTXADDR_HI(txq, x)						\
    655 	(sizeof(bus_addr_t) == 8 ?					\
    656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    657 
    658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    659 #define	WM_CDRXADDR_HI(rxq, x)						\
    660 	(sizeof(bus_addr_t) == 8 ?					\
    661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    662 
    663 /*
    664  * Register read/write functions.
    665  * Other than CSR_{READ|WRITE}().
    666  */
    667 #if 0
    668 static inline uint32_t wm_io_read(struct wm_softc *, int);
    669 #endif
    670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    672 	uint32_t, uint32_t);
    673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    674 
    675 /*
    676  * Descriptor sync/init functions.
    677  */
    678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    681 
    682 /*
    683  * Device driver interface functions and commonly used functions.
    684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    685  */
    686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    687 static int	wm_match(device_t, cfdata_t, void *);
    688 static void	wm_attach(device_t, device_t, void *);
    689 static int	wm_detach(device_t, int);
    690 static bool	wm_suspend(device_t, const pmf_qual_t *);
    691 static bool	wm_resume(device_t, const pmf_qual_t *);
    692 static void	wm_watchdog(struct ifnet *);
    693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, uint16_t *);
    694 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, uint16_t *);
    695 static void	wm_tick(void *);
    696 static int	wm_ifflags_cb(struct ethercom *);
    697 static int	wm_ioctl(struct ifnet *, u_long, void *);
    698 /* MAC address related */
    699 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    700 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    701 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    702 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    703 static void	wm_set_filter(struct wm_softc *);
    704 /* Reset and init related */
    705 static void	wm_set_vlan(struct wm_softc *);
    706 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    707 static void	wm_get_auto_rd_done(struct wm_softc *);
    708 static void	wm_lan_init_done(struct wm_softc *);
    709 static void	wm_get_cfg_done(struct wm_softc *);
    710 static void	wm_phy_post_reset(struct wm_softc *);
    711 static void	wm_write_smbus_addr(struct wm_softc *);
    712 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    713 static void	wm_initialize_hardware_bits(struct wm_softc *);
    714 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    715 static void	wm_reset_phy(struct wm_softc *);
    716 static void	wm_flush_desc_rings(struct wm_softc *);
    717 static void	wm_reset(struct wm_softc *);
    718 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    719 static void	wm_rxdrain(struct wm_rxqueue *);
    720 static void	wm_init_rss(struct wm_softc *);
    721 static void	wm_adjust_qnum(struct wm_softc *, int);
    722 static inline bool	wm_is_using_msix(struct wm_softc *);
    723 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    724 static int	wm_softint_establish(struct wm_softc *, int, int);
    725 static int	wm_setup_legacy(struct wm_softc *);
    726 static int	wm_setup_msix(struct wm_softc *);
    727 static int	wm_init(struct ifnet *);
    728 static int	wm_init_locked(struct ifnet *);
    729 static void	wm_unset_stopping_flags(struct wm_softc *);
    730 static void	wm_set_stopping_flags(struct wm_softc *);
    731 static void	wm_stop(struct ifnet *, int);
    732 static void	wm_stop_locked(struct ifnet *, int);
    733 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    734 static void	wm_82547_txfifo_stall(void *);
    735 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    736 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    737 /* DMA related */
    738 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    739 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    740 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    741 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    742     struct wm_txqueue *);
    743 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    744 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    745 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    746     struct wm_rxqueue *);
    747 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    750 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    751 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    752 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    754     struct wm_txqueue *);
    755 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    756     struct wm_rxqueue *);
    757 static int	wm_alloc_txrx_queues(struct wm_softc *);
    758 static void	wm_free_txrx_queues(struct wm_softc *);
    759 static int	wm_init_txrx_queues(struct wm_softc *);
    760 /* Start */
    761 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    762     struct wm_txsoft *, uint32_t *, uint8_t *);
    763 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    764 static void	wm_start(struct ifnet *);
    765 static void	wm_start_locked(struct ifnet *);
    766 static int	wm_transmit(struct ifnet *, struct mbuf *);
    767 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    768 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    769 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    771 static void	wm_nq_start(struct ifnet *);
    772 static void	wm_nq_start_locked(struct ifnet *);
    773 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    774 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    775 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    776 static void	wm_deferred_start_locked(struct wm_txqueue *);
    777 static void	wm_handle_queue(void *);
    778 /* Interrupt */
    779 static bool	wm_txeof(struct wm_txqueue *, u_int);
    780 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    781 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    782 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    783 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    784 static void	wm_linkintr(struct wm_softc *, uint32_t);
    785 static int	wm_intr_legacy(void *);
    786 static inline void	wm_txrxintr_disable(struct wm_queue *);
    787 static inline void	wm_txrxintr_enable(struct wm_queue *);
    788 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    789 static int	wm_txrxintr_msix(void *);
    790 static int	wm_linkintr_msix(void *);
    791 
    792 /*
    793  * Media related.
    794  * GMII, SGMII, TBI, SERDES and SFP.
    795  */
    796 /* Common */
    797 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    798 /* GMII related */
    799 static void	wm_gmii_reset(struct wm_softc *);
    800 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    801 static int	wm_get_phy_id_82575(struct wm_softc *);
    802 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    803 static int	wm_gmii_mediachange(struct ifnet *);
    804 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    805 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    806 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    807 static int	wm_gmii_i82543_readreg(device_t, int, int);
    808 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    809 static int	wm_gmii_mdic_readreg(device_t, int, int);
    810 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    811 static int	wm_gmii_i82544_readreg(device_t, int, int);
    812 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    813 static int	wm_gmii_i80003_readreg(device_t, int, int);
    814 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    815 static int	wm_gmii_bm_readreg(device_t, int, int);
    816 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    817 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    818 static int	wm_gmii_hv_readreg(device_t, int, int);
    819 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    820 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    821 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    822 static int	wm_gmii_82580_readreg(device_t, int, int);
    823 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    824 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    825 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    826 static void	wm_gmii_statchg(struct ifnet *);
    827 /*
    828  * kumeran related (80003, ICH* and PCH*).
    829  * These functions are not for accessing MII registers but for accessing
    830  * kumeran specific registers.
    831  */
    832 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    833 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    834 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    835 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    836 /* SGMII */
    837 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    838 static int	wm_sgmii_readreg(device_t, int, int);
    839 static void	wm_sgmii_writereg(device_t, int, int, int);
    840 /* TBI related */
    841 static void	wm_tbi_mediainit(struct wm_softc *);
    842 static int	wm_tbi_mediachange(struct ifnet *);
    843 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    844 static int	wm_check_for_link(struct wm_softc *);
    845 static void	wm_tbi_tick(struct wm_softc *);
    846 /* SERDES related */
    847 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    848 static int	wm_serdes_mediachange(struct ifnet *);
    849 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    850 static void	wm_serdes_tick(struct wm_softc *);
    851 /* SFP related */
    852 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    853 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    854 
    855 /*
    856  * NVM related.
    857  * Microwire, SPI (w/wo EERD) and Flash.
    858  */
    859 /* Misc functions */
    860 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    861 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    862 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    863 /* Microwire */
    864 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    865 /* SPI */
    866 static int	wm_nvm_ready_spi(struct wm_softc *);
    867 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    868 /* Using with EERD */
    869 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    870 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    871 /* Flash */
    872 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    873     unsigned int *);
    874 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    875 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    876 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    877 	uint32_t *);
    878 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    879 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    880 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    881 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    882 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    883 /* iNVM */
    884 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    885 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    886 /* Lock, detecting NVM type, validate checksum and read */
    887 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    888 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    889 static int	wm_nvm_validate_checksum(struct wm_softc *);
    890 static void	wm_nvm_version_invm(struct wm_softc *);
    891 static void	wm_nvm_version(struct wm_softc *);
    892 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    893 
    894 /*
    895  * Hardware semaphores.
    896  * Very complexed...
    897  */
    898 static int	wm_get_null(struct wm_softc *);
    899 static void	wm_put_null(struct wm_softc *);
    900 static int	wm_get_eecd(struct wm_softc *);
    901 static void	wm_put_eecd(struct wm_softc *);
    902 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    903 static void	wm_put_swsm_semaphore(struct wm_softc *);
    904 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    905 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    906 static int	wm_get_nvm_80003(struct wm_softc *);
    907 static void	wm_put_nvm_80003(struct wm_softc *);
    908 static int	wm_get_nvm_82571(struct wm_softc *);
    909 static void	wm_put_nvm_82571(struct wm_softc *);
    910 static int	wm_get_phy_82575(struct wm_softc *);
    911 static void	wm_put_phy_82575(struct wm_softc *);
    912 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    913 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    914 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    915 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    916 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    917 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    918 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    919 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    920 
    921 /*
    922  * Management mode and power management related subroutines.
    923  * BMC, AMT, suspend/resume and EEE.
    924  */
    925 #if 0
    926 static int	wm_check_mng_mode(struct wm_softc *);
    927 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    928 static int	wm_check_mng_mode_82574(struct wm_softc *);
    929 static int	wm_check_mng_mode_generic(struct wm_softc *);
    930 #endif
    931 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    932 static bool	wm_phy_resetisblocked(struct wm_softc *);
    933 static void	wm_get_hw_control(struct wm_softc *);
    934 static void	wm_release_hw_control(struct wm_softc *);
    935 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    936 static void	wm_smbustopci(struct wm_softc *);
    937 static void	wm_init_manageability(struct wm_softc *);
    938 static void	wm_release_manageability(struct wm_softc *);
    939 static void	wm_get_wakeup(struct wm_softc *);
    940 static void	wm_ulp_disable(struct wm_softc *);
    941 static void	wm_enable_phy_wakeup(struct wm_softc *);
    942 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    943 static void	wm_enable_wakeup(struct wm_softc *);
    944 static void	wm_disable_aspm(struct wm_softc *);
    945 /* LPLU (Low Power Link Up) */
    946 static void	wm_lplu_d0_disable(struct wm_softc *);
    947 /* EEE */
    948 static void	wm_set_eee_i350(struct wm_softc *);
    949 
    950 /*
    951  * Workarounds (mainly PHY related).
    952  * Basically, PHY's workarounds are in the PHY drivers.
    953  */
    954 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    955 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    956 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    957 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    958 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    959 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    960 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    961 static void	wm_reset_init_script_82575(struct wm_softc *);
    962 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    963 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    964 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    965 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    966 static void	wm_pll_workaround_i210(struct wm_softc *);
    967 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    968 
    969 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    970     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    971 
    972 /*
    973  * Devices supported by this driver.
    974  */
    975 static const struct wm_product {
    976 	pci_vendor_id_t		wmp_vendor;
    977 	pci_product_id_t	wmp_product;
    978 	const char		*wmp_name;
    979 	wm_chip_type		wmp_type;
    980 	uint32_t		wmp_flags;
    981 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    982 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    983 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    984 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    985 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    986 } wm_products[] = {
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    988 	  "Intel i82542 1000BASE-X Ethernet",
    989 	  WM_T_82542_2_1,	WMP_F_FIBER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    992 	  "Intel i82543GC 1000BASE-X Ethernet",
    993 	  WM_T_82543,		WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    996 	  "Intel i82543GC 1000BASE-T Ethernet",
    997 	  WM_T_82543,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1000 	  "Intel i82544EI 1000BASE-T Ethernet",
   1001 	  WM_T_82544,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1004 	  "Intel i82544EI 1000BASE-X Ethernet",
   1005 	  WM_T_82544,		WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1008 	  "Intel i82544GC 1000BASE-T Ethernet",
   1009 	  WM_T_82544,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1012 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1016 	  "Intel i82540EM 1000BASE-T Ethernet",
   1017 	  WM_T_82540,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1020 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1021 	  WM_T_82540,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1024 	  "Intel i82540EP 1000BASE-T Ethernet",
   1025 	  WM_T_82540,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1028 	  "Intel i82540EP 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1032 	  "Intel i82540EP 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1036 	  "Intel i82545EM 1000BASE-T Ethernet",
   1037 	  WM_T_82545,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1040 	  "Intel i82545GM 1000BASE-T Ethernet",
   1041 	  WM_T_82545_3,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1044 	  "Intel i82545GM 1000BASE-X Ethernet",
   1045 	  WM_T_82545_3,		WMP_F_FIBER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1048 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1049 	  WM_T_82545_3,		WMP_F_SERDES },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1052 	  "Intel i82546EB 1000BASE-T Ethernet",
   1053 	  WM_T_82546,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1056 	  "Intel i82546EB 1000BASE-T Ethernet",
   1057 	  WM_T_82546,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1060 	  "Intel i82545EM 1000BASE-X Ethernet",
   1061 	  WM_T_82545,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1064 	  "Intel i82546EB 1000BASE-X Ethernet",
   1065 	  WM_T_82546,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1068 	  "Intel i82546GB 1000BASE-T Ethernet",
   1069 	  WM_T_82546_3,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1072 	  "Intel i82546GB 1000BASE-X Ethernet",
   1073 	  WM_T_82546_3,		WMP_F_FIBER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1076 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1077 	  WM_T_82546_3,		WMP_F_SERDES },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1080 	  "i82546GB quad-port Gigabit Ethernet",
   1081 	  WM_T_82546_3,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1084 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1088 	  "Intel PRO/1000MT (82546GB)",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1092 	  "Intel i82541EI 1000BASE-T Ethernet",
   1093 	  WM_T_82541,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1096 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1097 	  WM_T_82541,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1100 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1101 	  WM_T_82541,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1104 	  "Intel i82541ER 1000BASE-T Ethernet",
   1105 	  WM_T_82541_2,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1108 	  "Intel i82541GI 1000BASE-T Ethernet",
   1109 	  WM_T_82541_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1112 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1113 	  WM_T_82541_2,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1116 	  "Intel i82541PI 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1120 	  "Intel i82547EI 1000BASE-T Ethernet",
   1121 	  WM_T_82547,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1124 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1125 	  WM_T_82547,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1128 	  "Intel i82547GI 1000BASE-T Ethernet",
   1129 	  WM_T_82547_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1132 	  "Intel PRO/1000 PT (82571EB)",
   1133 	  WM_T_82571,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1136 	  "Intel PRO/1000 PF (82571EB)",
   1137 	  WM_T_82571,		WMP_F_FIBER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1140 	  "Intel PRO/1000 PB (82571EB)",
   1141 	  WM_T_82571,		WMP_F_SERDES },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1144 	  "Intel PRO/1000 QT (82571EB)",
   1145 	  WM_T_82571,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1148 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1149 	  WM_T_82571,		WMP_F_COPPER, },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1152 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1153 	  WM_T_82571,		WMP_F_COPPER, },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1156 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82571,		WMP_F_SERDES, },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1160 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1161 	  WM_T_82571,		WMP_F_SERDES, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1164 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1165 	  WM_T_82571,		WMP_F_FIBER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1168 	  "Intel i82572EI 1000baseT Ethernet",
   1169 	  WM_T_82572,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1172 	  "Intel i82572EI 1000baseX Ethernet",
   1173 	  WM_T_82572,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1176 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82572,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1180 	  "Intel i82572EI 1000baseT Ethernet",
   1181 	  WM_T_82572,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1184 	  "Intel i82573E",
   1185 	  WM_T_82573,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1188 	  "Intel i82573E IAMT",
   1189 	  WM_T_82573,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1192 	  "Intel i82573L Gigabit Ethernet",
   1193 	  WM_T_82573,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1196 	  "Intel i82574L",
   1197 	  WM_T_82574,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1200 	  "Intel i82574L",
   1201 	  WM_T_82574,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1204 	  "Intel i82583V",
   1205 	  WM_T_82583,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1208 	  "i80003 dual 1000baseT Ethernet",
   1209 	  WM_T_80003,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1212 	  "i80003 dual 1000baseX Ethernet",
   1213 	  WM_T_80003,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1216 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1217 	  WM_T_80003,		WMP_F_SERDES },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1220 	  "Intel i80003 1000baseT Ethernet",
   1221 	  WM_T_80003,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1224 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1225 	  WM_T_80003,		WMP_F_SERDES },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1228 	  "Intel i82801H (M_AMT) LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1231 	  "Intel i82801H (AMT) LAN Controller",
   1232 	  WM_T_ICH8,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1234 	  "Intel i82801H LAN Controller",
   1235 	  WM_T_ICH8,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1237 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1238 	  WM_T_ICH8,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1240 	  "Intel i82801H (M) LAN Controller",
   1241 	  WM_T_ICH8,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1243 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1244 	  WM_T_ICH8,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1246 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1249 	  "82567V-3 LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1252 	  "82801I (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1255 	  "82801I 10/100 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1258 	  "82801I (G) 10/100 LAN Controller",
   1259 	  WM_T_ICH9,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1261 	  "82801I (GT) 10/100 LAN Controller",
   1262 	  WM_T_ICH9,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1264 	  "82801I (C) LAN Controller",
   1265 	  WM_T_ICH9,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1267 	  "82801I mobile LAN Controller",
   1268 	  WM_T_ICH9,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1270 	  "82801I mobile (V) LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1273 	  "82801I mobile (AMT) LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1276 	  "82567LM-4 LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1279 	  "82567LM-2 LAN Controller",
   1280 	  WM_T_ICH10,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1282 	  "82567LF-2 LAN Controller",
   1283 	  WM_T_ICH10,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1285 	  "82567LM-3 LAN Controller",
   1286 	  WM_T_ICH10,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1288 	  "82567LF-3 LAN Controller",
   1289 	  WM_T_ICH10,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1291 	  "82567V-2 LAN Controller",
   1292 	  WM_T_ICH10,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1294 	  "82567V-3? LAN Controller",
   1295 	  WM_T_ICH10,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1297 	  "HANKSVILLE LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1300 	  "PCH LAN (82577LM) Controller",
   1301 	  WM_T_PCH,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1303 	  "PCH LAN (82577LC) Controller",
   1304 	  WM_T_PCH,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1306 	  "PCH LAN (82578DM) Controller",
   1307 	  WM_T_PCH,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1309 	  "PCH LAN (82578DC) Controller",
   1310 	  WM_T_PCH,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1312 	  "PCH2 LAN (82579LM) Controller",
   1313 	  WM_T_PCH2,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1315 	  "PCH2 LAN (82579V) Controller",
   1316 	  WM_T_PCH2,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1318 	  "82575EB dual-1000baseT Ethernet",
   1319 	  WM_T_82575,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1321 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1322 	  WM_T_82575,		WMP_F_SERDES },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1324 	  "82575GB quad-1000baseT Ethernet",
   1325 	  WM_T_82575,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1327 	  "82575GB quad-1000baseT Ethernet (PM)",
   1328 	  WM_T_82575,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1330 	  "82576 1000BaseT Ethernet",
   1331 	  WM_T_82576,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1333 	  "82576 1000BaseX Ethernet",
   1334 	  WM_T_82576,		WMP_F_FIBER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1337 	  "82576 gigabit Ethernet (SERDES)",
   1338 	  WM_T_82576,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1341 	  "82576 quad-1000BaseT Ethernet",
   1342 	  WM_T_82576,		WMP_F_COPPER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1345 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1346 	  WM_T_82576,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1349 	  "82576 gigabit Ethernet",
   1350 	  WM_T_82576,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1353 	  "82576 gigabit Ethernet (SERDES)",
   1354 	  WM_T_82576,		WMP_F_SERDES },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1356 	  "82576 quad-gigabit Ethernet (SERDES)",
   1357 	  WM_T_82576,		WMP_F_SERDES },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1360 	  "82580 1000BaseT Ethernet",
   1361 	  WM_T_82580,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1363 	  "82580 1000BaseX Ethernet",
   1364 	  WM_T_82580,		WMP_F_FIBER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1367 	  "82580 1000BaseT Ethernet (SERDES)",
   1368 	  WM_T_82580,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1371 	  "82580 gigabit Ethernet (SGMII)",
   1372 	  WM_T_82580,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1374 	  "82580 dual-1000BaseT Ethernet",
   1375 	  WM_T_82580,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1378 	  "82580 quad-1000BaseX Ethernet",
   1379 	  WM_T_82580,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1382 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1383 	  WM_T_82580,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1386 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1387 	  WM_T_82580,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1390 	  "DH89XXCC 1000BASE-KX Ethernet",
   1391 	  WM_T_82580,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1394 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1395 	  WM_T_82580,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1398 	  "I350 Gigabit Network Connection",
   1399 	  WM_T_I350,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1402 	  "I350 Gigabit Fiber Network Connection",
   1403 	  WM_T_I350,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1406 	  "I350 Gigabit Backplane Connection",
   1407 	  WM_T_I350,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1410 	  "I350 Quad Port Gigabit Ethernet",
   1411 	  WM_T_I350,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1414 	  "I350 Gigabit Connection",
   1415 	  WM_T_I350,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1418 	  "I354 Gigabit Ethernet (KX)",
   1419 	  WM_T_I354,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1422 	  "I354 Gigabit Ethernet (SGMII)",
   1423 	  WM_T_I354,		WMP_F_COPPER },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1426 	  "I354 Gigabit Ethernet (2.5G)",
   1427 	  WM_T_I354,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1430 	  "I210-T1 Ethernet Server Adapter",
   1431 	  WM_T_I210,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1434 	  "I210 Ethernet (Copper OEM)",
   1435 	  WM_T_I210,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1438 	  "I210 Ethernet (Copper IT)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1442 	  "I210 Ethernet (FLASH less)",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1446 	  "I210 Gigabit Ethernet (Fiber)",
   1447 	  WM_T_I210,		WMP_F_FIBER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1450 	  "I210 Gigabit Ethernet (SERDES)",
   1451 	  WM_T_I210,		WMP_F_SERDES },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1454 	  "I210 Gigabit Ethernet (FLASH less)",
   1455 	  WM_T_I210,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1458 	  "I210 Gigabit Ethernet (SGMII)",
   1459 	  WM_T_I210,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1462 	  "I211 Ethernet (COPPER)",
   1463 	  WM_T_I211,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1465 	  "I217 V Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1468 	  "I217 LM Ethernet Connection",
   1469 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1471 	  "I218 V Ethernet Connection",
   1472 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1474 	  "I218 V Ethernet Connection",
   1475 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1477 	  "I218 V Ethernet Connection",
   1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1480 	  "I218 LM Ethernet Connection",
   1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1483 	  "I218 LM Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1486 	  "I218 LM Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 #if 0
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1490 	  "I219 V Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1493 	  "I219 V Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1496 	  "I219 V Ethernet Connection",
   1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1499 	  "I219 V Ethernet Connection",
   1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1502 	  "I219 LM Ethernet Connection",
   1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1505 	  "I219 LM Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1508 	  "I219 LM Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1511 	  "I219 LM Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1514 	  "I219 LM Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1517 	  "I219 V Ethernet Connection",
   1518 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1520 	  "I219 V Ethernet Connection",
   1521 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1523 	  "I219 LM Ethernet Connection",
   1524 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1528 #endif
   1529 	{ 0,			0,
   1530 	  NULL,
   1531 	  0,			0 },
   1532 };
   1533 
   1534 /*
   1535  * Register read/write functions.
   1536  * Other than CSR_{READ|WRITE}().
   1537  */
   1538 
   1539 #if 0 /* Not currently used */
   1540 static inline uint32_t
   1541 wm_io_read(struct wm_softc *sc, int reg)
   1542 {
   1543 
   1544 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1545 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1546 }
   1547 #endif
   1548 
   1549 static inline void
   1550 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1551 {
   1552 
   1553 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1554 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1555 }
   1556 
   1557 static inline void
   1558 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1559     uint32_t data)
   1560 {
   1561 	uint32_t regval;
   1562 	int i;
   1563 
   1564 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1565 
   1566 	CSR_WRITE(sc, reg, regval);
   1567 
   1568 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1569 		delay(5);
   1570 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1571 			break;
   1572 	}
   1573 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1574 		aprint_error("%s: WARNING:"
   1575 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1576 		    device_xname(sc->sc_dev), reg);
   1577 	}
   1578 }
   1579 
   1580 static inline void
   1581 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1582 {
   1583 	wa->wa_low = htole32(v & 0xffffffffU);
   1584 	if (sizeof(bus_addr_t) == 8)
   1585 		wa->wa_high = htole32((uint64_t) v >> 32);
   1586 	else
   1587 		wa->wa_high = 0;
   1588 }
   1589 
   1590 /*
   1591  * Descriptor sync/init functions.
   1592  */
   1593 static inline void
   1594 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1595 {
   1596 	struct wm_softc *sc = txq->txq_sc;
   1597 
   1598 	/* If it will wrap around, sync to the end of the ring. */
   1599 	if ((start + num) > WM_NTXDESC(txq)) {
   1600 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1601 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1602 		    (WM_NTXDESC(txq) - start), ops);
   1603 		num -= (WM_NTXDESC(txq) - start);
   1604 		start = 0;
   1605 	}
   1606 
   1607 	/* Now sync whatever is left. */
   1608 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1609 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1610 }
   1611 
   1612 static inline void
   1613 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1614 {
   1615 	struct wm_softc *sc = rxq->rxq_sc;
   1616 
   1617 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1618 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1619 }
   1620 
   1621 static inline void
   1622 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1623 {
   1624 	struct wm_softc *sc = rxq->rxq_sc;
   1625 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1626 	struct mbuf *m = rxs->rxs_mbuf;
   1627 
   1628 	/*
   1629 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1630 	 * so that the payload after the Ethernet header is aligned
   1631 	 * to a 4-byte boundary.
   1632 
   1633 	 * XXX BRAINDAMAGE ALERT!
   1634 	 * The stupid chip uses the same size for every buffer, which
   1635 	 * is set in the Receive Control register.  We are using the 2K
   1636 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1637 	 * reason, we can't "scoot" packets longer than the standard
   1638 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1639 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1640 	 * the upper layer copy the headers.
   1641 	 */
   1642 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1643 
   1644 	if (sc->sc_type == WM_T_82574) {
   1645 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1646 		rxd->erx_data.erxd_addr =
   1647 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1648 		rxd->erx_data.erxd_dd = 0;
   1649 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1650 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1651 
   1652 		rxd->nqrx_data.nrxd_paddr =
   1653 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1654 		/* Currently, split header is not supported. */
   1655 		rxd->nqrx_data.nrxd_haddr = 0;
   1656 	} else {
   1657 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1658 
   1659 		wm_set_dma_addr(&rxd->wrx_addr,
   1660 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1661 		rxd->wrx_len = 0;
   1662 		rxd->wrx_cksum = 0;
   1663 		rxd->wrx_status = 0;
   1664 		rxd->wrx_errors = 0;
   1665 		rxd->wrx_special = 0;
   1666 	}
   1667 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1668 
   1669 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1670 }
   1671 
   1672 /*
   1673  * Device driver interface functions and commonly used functions.
   1674  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1675  */
   1676 
   1677 /* Lookup supported device table */
   1678 static const struct wm_product *
   1679 wm_lookup(const struct pci_attach_args *pa)
   1680 {
   1681 	const struct wm_product *wmp;
   1682 
   1683 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1684 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1685 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1686 			return wmp;
   1687 	}
   1688 	return NULL;
   1689 }
   1690 
   1691 /* The match function (ca_match) */
   1692 static int
   1693 wm_match(device_t parent, cfdata_t cf, void *aux)
   1694 {
   1695 	struct pci_attach_args *pa = aux;
   1696 
   1697 	if (wm_lookup(pa) != NULL)
   1698 		return 1;
   1699 
   1700 	return 0;
   1701 }
   1702 
   1703 /* The attach function (ca_attach) */
   1704 static void
   1705 wm_attach(device_t parent, device_t self, void *aux)
   1706 {
   1707 	struct wm_softc *sc = device_private(self);
   1708 	struct pci_attach_args *pa = aux;
   1709 	prop_dictionary_t dict;
   1710 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1711 	pci_chipset_tag_t pc = pa->pa_pc;
   1712 	int counts[PCI_INTR_TYPE_SIZE];
   1713 	pci_intr_type_t max_type;
   1714 	const char *eetype, *xname;
   1715 	bus_space_tag_t memt;
   1716 	bus_space_handle_t memh;
   1717 	bus_size_t memsize;
   1718 	int memh_valid;
   1719 	int i, error;
   1720 	const struct wm_product *wmp;
   1721 	prop_data_t ea;
   1722 	prop_number_t pn;
   1723 	uint8_t enaddr[ETHER_ADDR_LEN];
   1724 	char buf[256];
   1725 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1726 	pcireg_t preg, memtype;
   1727 	uint16_t eeprom_data, apme_mask;
   1728 	bool force_clear_smbi;
   1729 	uint32_t link_mode;
   1730 	uint32_t reg;
   1731 
   1732 	sc->sc_dev = self;
   1733 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1734 	sc->sc_core_stopping = false;
   1735 
   1736 	wmp = wm_lookup(pa);
   1737 #ifdef DIAGNOSTIC
   1738 	if (wmp == NULL) {
   1739 		printf("\n");
   1740 		panic("wm_attach: impossible");
   1741 	}
   1742 #endif
   1743 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1744 
   1745 	sc->sc_pc = pa->pa_pc;
   1746 	sc->sc_pcitag = pa->pa_tag;
   1747 
   1748 	if (pci_dma64_available(pa))
   1749 		sc->sc_dmat = pa->pa_dmat64;
   1750 	else
   1751 		sc->sc_dmat = pa->pa_dmat;
   1752 
   1753 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1754 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1755 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1756 
   1757 	sc->sc_type = wmp->wmp_type;
   1758 
   1759 	/* Set default function pointers */
   1760 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1761 	sc->phy.release = sc->nvm.release = wm_put_null;
   1762 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1763 
   1764 	if (sc->sc_type < WM_T_82543) {
   1765 		if (sc->sc_rev < 2) {
   1766 			aprint_error_dev(sc->sc_dev,
   1767 			    "i82542 must be at least rev. 2\n");
   1768 			return;
   1769 		}
   1770 		if (sc->sc_rev < 3)
   1771 			sc->sc_type = WM_T_82542_2_0;
   1772 	}
   1773 
   1774 	/*
   1775 	 * Disable MSI for Errata:
   1776 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1777 	 *
   1778 	 *  82544: Errata 25
   1779 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1780 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1781 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1782 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1783 	 *
   1784 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1785 	 *
   1786 	 *  82571 & 82572: Errata 63
   1787 	 */
   1788 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1789 	    || (sc->sc_type == WM_T_82572))
   1790 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1791 
   1792 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1793 	    || (sc->sc_type == WM_T_82580)
   1794 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1795 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1796 		sc->sc_flags |= WM_F_NEWQUEUE;
   1797 
   1798 	/* Set device properties (mactype) */
   1799 	dict = device_properties(sc->sc_dev);
   1800 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1801 
   1802 	/*
   1803 	 * Map the device.  All devices support memory-mapped acccess,
   1804 	 * and it is really required for normal operation.
   1805 	 */
   1806 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1807 	switch (memtype) {
   1808 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1809 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1810 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1811 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1812 		break;
   1813 	default:
   1814 		memh_valid = 0;
   1815 		break;
   1816 	}
   1817 
   1818 	if (memh_valid) {
   1819 		sc->sc_st = memt;
   1820 		sc->sc_sh = memh;
   1821 		sc->sc_ss = memsize;
   1822 	} else {
   1823 		aprint_error_dev(sc->sc_dev,
   1824 		    "unable to map device registers\n");
   1825 		return;
   1826 	}
   1827 
   1828 	/*
   1829 	 * In addition, i82544 and later support I/O mapped indirect
   1830 	 * register access.  It is not desirable (nor supported in
   1831 	 * this driver) to use it for normal operation, though it is
   1832 	 * required to work around bugs in some chip versions.
   1833 	 */
   1834 	if (sc->sc_type >= WM_T_82544) {
   1835 		/* First we have to find the I/O BAR. */
   1836 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1837 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1838 			if (memtype == PCI_MAPREG_TYPE_IO)
   1839 				break;
   1840 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1841 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1842 				i += 4;	/* skip high bits, too */
   1843 		}
   1844 		if (i < PCI_MAPREG_END) {
   1845 			/*
   1846 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1847 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1848 			 * It's no problem because newer chips has no this
   1849 			 * bug.
   1850 			 *
   1851 			 * The i8254x doesn't apparently respond when the
   1852 			 * I/O BAR is 0, which looks somewhat like it's not
   1853 			 * been configured.
   1854 			 */
   1855 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1856 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1857 				aprint_error_dev(sc->sc_dev,
   1858 				    "WARNING: I/O BAR at zero.\n");
   1859 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1860 					0, &sc->sc_iot, &sc->sc_ioh,
   1861 					NULL, &sc->sc_ios) == 0) {
   1862 				sc->sc_flags |= WM_F_IOH_VALID;
   1863 			} else {
   1864 				aprint_error_dev(sc->sc_dev,
   1865 				    "WARNING: unable to map I/O space\n");
   1866 			}
   1867 		}
   1868 
   1869 	}
   1870 
   1871 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1872 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1873 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1874 	if (sc->sc_type < WM_T_82542_2_1)
   1875 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1876 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1877 
   1878 	/* power up chip */
   1879 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1880 	    NULL)) && error != EOPNOTSUPP) {
   1881 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1882 		return;
   1883 	}
   1884 
   1885 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1886 	/*
   1887 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1888 	 * resource.
   1889 	 */
   1890 	if (sc->sc_nqueues > 1) {
   1891 		max_type = PCI_INTR_TYPE_MSIX;
   1892 		/*
   1893 		 *  82583 has a MSI-X capability in the PCI configuration space
   1894 		 * but it doesn't support it. At least the document doesn't
   1895 		 * say anything about MSI-X.
   1896 		 */
   1897 		counts[PCI_INTR_TYPE_MSIX]
   1898 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1899 	} else {
   1900 		max_type = PCI_INTR_TYPE_MSI;
   1901 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1902 	}
   1903 
   1904 	/* Allocation settings */
   1905 	counts[PCI_INTR_TYPE_MSI] = 1;
   1906 	counts[PCI_INTR_TYPE_INTX] = 1;
   1907 	/* overridden by disable flags */
   1908 	if (wm_disable_msi != 0) {
   1909 		counts[PCI_INTR_TYPE_MSI] = 0;
   1910 		if (wm_disable_msix != 0) {
   1911 			max_type = PCI_INTR_TYPE_INTX;
   1912 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1913 		}
   1914 	} else if (wm_disable_msix != 0) {
   1915 		max_type = PCI_INTR_TYPE_MSI;
   1916 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1917 	}
   1918 
   1919 alloc_retry:
   1920 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1921 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1922 		return;
   1923 	}
   1924 
   1925 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1926 		error = wm_setup_msix(sc);
   1927 		if (error) {
   1928 			pci_intr_release(pc, sc->sc_intrs,
   1929 			    counts[PCI_INTR_TYPE_MSIX]);
   1930 
   1931 			/* Setup for MSI: Disable MSI-X */
   1932 			max_type = PCI_INTR_TYPE_MSI;
   1933 			counts[PCI_INTR_TYPE_MSI] = 1;
   1934 			counts[PCI_INTR_TYPE_INTX] = 1;
   1935 			goto alloc_retry;
   1936 		}
   1937 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1938 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1939 		error = wm_setup_legacy(sc);
   1940 		if (error) {
   1941 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1942 			    counts[PCI_INTR_TYPE_MSI]);
   1943 
   1944 			/* The next try is for INTx: Disable MSI */
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_INTX] = 1;
   1947 			goto alloc_retry;
   1948 		}
   1949 	} else {
   1950 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1951 		error = wm_setup_legacy(sc);
   1952 		if (error) {
   1953 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1954 			    counts[PCI_INTR_TYPE_INTX]);
   1955 			return;
   1956 		}
   1957 	}
   1958 
   1959 	/*
   1960 	 * Check the function ID (unit number of the chip).
   1961 	 */
   1962 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1963 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1964 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1965 	    || (sc->sc_type == WM_T_82580)
   1966 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1967 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1968 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1969 	else
   1970 		sc->sc_funcid = 0;
   1971 
   1972 	/*
   1973 	 * Determine a few things about the bus we're connected to.
   1974 	 */
   1975 	if (sc->sc_type < WM_T_82543) {
   1976 		/* We don't really know the bus characteristics here. */
   1977 		sc->sc_bus_speed = 33;
   1978 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1979 		/*
   1980 		 * CSA (Communication Streaming Architecture) is about as fast
   1981 		 * a 32-bit 66MHz PCI Bus.
   1982 		 */
   1983 		sc->sc_flags |= WM_F_CSA;
   1984 		sc->sc_bus_speed = 66;
   1985 		aprint_verbose_dev(sc->sc_dev,
   1986 		    "Communication Streaming Architecture\n");
   1987 		if (sc->sc_type == WM_T_82547) {
   1988 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1989 			callout_setfunc(&sc->sc_txfifo_ch,
   1990 					wm_82547_txfifo_stall, sc);
   1991 			aprint_verbose_dev(sc->sc_dev,
   1992 			    "using 82547 Tx FIFO stall work-around\n");
   1993 		}
   1994 	} else if (sc->sc_type >= WM_T_82571) {
   1995 		sc->sc_flags |= WM_F_PCIE;
   1996 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1997 		    && (sc->sc_type != WM_T_ICH10)
   1998 		    && (sc->sc_type != WM_T_PCH)
   1999 		    && (sc->sc_type != WM_T_PCH2)
   2000 		    && (sc->sc_type != WM_T_PCH_LPT)
   2001 		    && (sc->sc_type != WM_T_PCH_SPT)
   2002 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2003 			/* ICH* and PCH* have no PCIe capability registers */
   2004 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2005 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2006 				NULL) == 0)
   2007 				aprint_error_dev(sc->sc_dev,
   2008 				    "unable to find PCIe capability\n");
   2009 		}
   2010 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2011 	} else {
   2012 		reg = CSR_READ(sc, WMREG_STATUS);
   2013 		if (reg & STATUS_BUS64)
   2014 			sc->sc_flags |= WM_F_BUS64;
   2015 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2016 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2017 
   2018 			sc->sc_flags |= WM_F_PCIX;
   2019 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2020 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2021 				aprint_error_dev(sc->sc_dev,
   2022 				    "unable to find PCIX capability\n");
   2023 			else if (sc->sc_type != WM_T_82545_3 &&
   2024 				 sc->sc_type != WM_T_82546_3) {
   2025 				/*
   2026 				 * Work around a problem caused by the BIOS
   2027 				 * setting the max memory read byte count
   2028 				 * incorrectly.
   2029 				 */
   2030 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2031 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2032 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2033 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2034 
   2035 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2036 				    PCIX_CMD_BYTECNT_SHIFT;
   2037 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2038 				    PCIX_STATUS_MAXB_SHIFT;
   2039 				if (bytecnt > maxb) {
   2040 					aprint_verbose_dev(sc->sc_dev,
   2041 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2042 					    512 << bytecnt, 512 << maxb);
   2043 					pcix_cmd = (pcix_cmd &
   2044 					    ~PCIX_CMD_BYTECNT_MASK) |
   2045 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2046 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2047 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2048 					    pcix_cmd);
   2049 				}
   2050 			}
   2051 		}
   2052 		/*
   2053 		 * The quad port adapter is special; it has a PCIX-PCIX
   2054 		 * bridge on the board, and can run the secondary bus at
   2055 		 * a higher speed.
   2056 		 */
   2057 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2058 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2059 								      : 66;
   2060 		} else if (sc->sc_flags & WM_F_PCIX) {
   2061 			switch (reg & STATUS_PCIXSPD_MASK) {
   2062 			case STATUS_PCIXSPD_50_66:
   2063 				sc->sc_bus_speed = 66;
   2064 				break;
   2065 			case STATUS_PCIXSPD_66_100:
   2066 				sc->sc_bus_speed = 100;
   2067 				break;
   2068 			case STATUS_PCIXSPD_100_133:
   2069 				sc->sc_bus_speed = 133;
   2070 				break;
   2071 			default:
   2072 				aprint_error_dev(sc->sc_dev,
   2073 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2074 				    reg & STATUS_PCIXSPD_MASK);
   2075 				sc->sc_bus_speed = 66;
   2076 				break;
   2077 			}
   2078 		} else
   2079 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2080 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2081 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2082 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2083 	}
   2084 
   2085 	/* Disable ASPM L0s and/or L1 for workaround */
   2086 	wm_disable_aspm(sc);
   2087 
   2088 	/* clear interesting stat counters */
   2089 	CSR_READ(sc, WMREG_COLC);
   2090 	CSR_READ(sc, WMREG_RXERRC);
   2091 
   2092 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2093 	    || (sc->sc_type >= WM_T_ICH8))
   2094 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2095 	if (sc->sc_type >= WM_T_ICH8)
   2096 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2097 
   2098 	/* Set PHY, NVM mutex related stuff */
   2099 	switch (sc->sc_type) {
   2100 	case WM_T_82542_2_0:
   2101 	case WM_T_82542_2_1:
   2102 	case WM_T_82543:
   2103 	case WM_T_82544:
   2104 		/* Microwire */
   2105 		sc->nvm.read = wm_nvm_read_uwire;
   2106 		sc->sc_nvm_wordsize = 64;
   2107 		sc->sc_nvm_addrbits = 6;
   2108 		break;
   2109 	case WM_T_82540:
   2110 	case WM_T_82545:
   2111 	case WM_T_82545_3:
   2112 	case WM_T_82546:
   2113 	case WM_T_82546_3:
   2114 		/* Microwire */
   2115 		sc->nvm.read = wm_nvm_read_uwire;
   2116 		reg = CSR_READ(sc, WMREG_EECD);
   2117 		if (reg & EECD_EE_SIZE) {
   2118 			sc->sc_nvm_wordsize = 256;
   2119 			sc->sc_nvm_addrbits = 8;
   2120 		} else {
   2121 			sc->sc_nvm_wordsize = 64;
   2122 			sc->sc_nvm_addrbits = 6;
   2123 		}
   2124 		sc->sc_flags |= WM_F_LOCK_EECD;
   2125 		sc->nvm.acquire = wm_get_eecd;
   2126 		sc->nvm.release = wm_put_eecd;
   2127 		break;
   2128 	case WM_T_82541:
   2129 	case WM_T_82541_2:
   2130 	case WM_T_82547:
   2131 	case WM_T_82547_2:
   2132 		reg = CSR_READ(sc, WMREG_EECD);
   2133 		/*
   2134 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2135 		 * on 8254[17], so set flags and functios before calling it.
   2136 		 */
   2137 		sc->sc_flags |= WM_F_LOCK_EECD;
   2138 		sc->nvm.acquire = wm_get_eecd;
   2139 		sc->nvm.release = wm_put_eecd;
   2140 		if (reg & EECD_EE_TYPE) {
   2141 			/* SPI */
   2142 			sc->nvm.read = wm_nvm_read_spi;
   2143 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2144 			wm_nvm_set_addrbits_size_eecd(sc);
   2145 		} else {
   2146 			/* Microwire */
   2147 			sc->nvm.read = wm_nvm_read_uwire;
   2148 			if ((reg & EECD_EE_ABITS) != 0) {
   2149 				sc->sc_nvm_wordsize = 256;
   2150 				sc->sc_nvm_addrbits = 8;
   2151 			} else {
   2152 				sc->sc_nvm_wordsize = 64;
   2153 				sc->sc_nvm_addrbits = 6;
   2154 			}
   2155 		}
   2156 		break;
   2157 	case WM_T_82571:
   2158 	case WM_T_82572:
   2159 		/* SPI */
   2160 		sc->nvm.read = wm_nvm_read_eerd;
   2161 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2162 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2163 		wm_nvm_set_addrbits_size_eecd(sc);
   2164 		sc->phy.acquire = wm_get_swsm_semaphore;
   2165 		sc->phy.release = wm_put_swsm_semaphore;
   2166 		sc->nvm.acquire = wm_get_nvm_82571;
   2167 		sc->nvm.release = wm_put_nvm_82571;
   2168 		break;
   2169 	case WM_T_82573:
   2170 	case WM_T_82574:
   2171 	case WM_T_82583:
   2172 		sc->nvm.read = wm_nvm_read_eerd;
   2173 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2174 		if (sc->sc_type == WM_T_82573) {
   2175 			sc->phy.acquire = wm_get_swsm_semaphore;
   2176 			sc->phy.release = wm_put_swsm_semaphore;
   2177 			sc->nvm.acquire = wm_get_nvm_82571;
   2178 			sc->nvm.release = wm_put_nvm_82571;
   2179 		} else {
   2180 			/* Both PHY and NVM use the same semaphore. */
   2181 			sc->phy.acquire = sc->nvm.acquire
   2182 			    = wm_get_swfwhw_semaphore;
   2183 			sc->phy.release = sc->nvm.release
   2184 			    = wm_put_swfwhw_semaphore;
   2185 		}
   2186 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2187 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2188 			sc->sc_nvm_wordsize = 2048;
   2189 		} else {
   2190 			/* SPI */
   2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2192 			wm_nvm_set_addrbits_size_eecd(sc);
   2193 		}
   2194 		break;
   2195 	case WM_T_82575:
   2196 	case WM_T_82576:
   2197 	case WM_T_82580:
   2198 	case WM_T_I350:
   2199 	case WM_T_I354:
   2200 	case WM_T_80003:
   2201 		/* SPI */
   2202 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2203 		wm_nvm_set_addrbits_size_eecd(sc);
   2204 		if((sc->sc_type == WM_T_80003)
   2205 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2206 			sc->nvm.read = wm_nvm_read_eerd;
   2207 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2208 		} else {
   2209 			sc->nvm.read = wm_nvm_read_spi;
   2210 			sc->sc_flags |= WM_F_LOCK_EECD;
   2211 		}
   2212 		sc->phy.acquire = wm_get_phy_82575;
   2213 		sc->phy.release = wm_put_phy_82575;
   2214 		sc->nvm.acquire = wm_get_nvm_80003;
   2215 		sc->nvm.release = wm_put_nvm_80003;
   2216 		break;
   2217 	case WM_T_ICH8:
   2218 	case WM_T_ICH9:
   2219 	case WM_T_ICH10:
   2220 	case WM_T_PCH:
   2221 	case WM_T_PCH2:
   2222 	case WM_T_PCH_LPT:
   2223 		sc->nvm.read = wm_nvm_read_ich8;
   2224 		/* FLASH */
   2225 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2226 		sc->sc_nvm_wordsize = 2048;
   2227 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2228 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2229 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2230 			aprint_error_dev(sc->sc_dev,
   2231 			    "can't map FLASH registers\n");
   2232 			goto out;
   2233 		}
   2234 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2235 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2236 		    ICH_FLASH_SECTOR_SIZE;
   2237 		sc->sc_ich8_flash_bank_size =
   2238 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2239 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2240 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2241 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2242 		sc->sc_flashreg_offset = 0;
   2243 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2244 		sc->phy.release = wm_put_swflag_ich8lan;
   2245 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2246 		sc->nvm.release = wm_put_nvm_ich8lan;
   2247 		break;
   2248 	case WM_T_PCH_SPT:
   2249 	case WM_T_PCH_CNP:
   2250 		sc->nvm.read = wm_nvm_read_spt;
   2251 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2252 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2253 		sc->sc_flasht = sc->sc_st;
   2254 		sc->sc_flashh = sc->sc_sh;
   2255 		sc->sc_ich8_flash_base = 0;
   2256 		sc->sc_nvm_wordsize =
   2257 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2258 			* NVM_SIZE_MULTIPLIER;
   2259 		/* It is size in bytes, we want words */
   2260 		sc->sc_nvm_wordsize /= 2;
   2261 		/* assume 2 banks */
   2262 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2263 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2264 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2265 		sc->phy.release = wm_put_swflag_ich8lan;
   2266 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2267 		sc->nvm.release = wm_put_nvm_ich8lan;
   2268 		break;
   2269 	case WM_T_I210:
   2270 	case WM_T_I211:
   2271 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2272 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2273 		if (wm_nvm_flash_presence_i210(sc)) {
   2274 			sc->nvm.read = wm_nvm_read_eerd;
   2275 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2276 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2277 			wm_nvm_set_addrbits_size_eecd(sc);
   2278 		} else {
   2279 			sc->nvm.read = wm_nvm_read_invm;
   2280 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2281 			sc->sc_nvm_wordsize = INVM_SIZE;
   2282 		}
   2283 		sc->phy.acquire = wm_get_phy_82575;
   2284 		sc->phy.release = wm_put_phy_82575;
   2285 		sc->nvm.acquire = wm_get_nvm_80003;
   2286 		sc->nvm.release = wm_put_nvm_80003;
   2287 		break;
   2288 	default:
   2289 		break;
   2290 	}
   2291 
   2292 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2293 	switch (sc->sc_type) {
   2294 	case WM_T_82571:
   2295 	case WM_T_82572:
   2296 		reg = CSR_READ(sc, WMREG_SWSM2);
   2297 		if ((reg & SWSM2_LOCK) == 0) {
   2298 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2299 			force_clear_smbi = true;
   2300 		} else
   2301 			force_clear_smbi = false;
   2302 		break;
   2303 	case WM_T_82573:
   2304 	case WM_T_82574:
   2305 	case WM_T_82583:
   2306 		force_clear_smbi = true;
   2307 		break;
   2308 	default:
   2309 		force_clear_smbi = false;
   2310 		break;
   2311 	}
   2312 	if (force_clear_smbi) {
   2313 		reg = CSR_READ(sc, WMREG_SWSM);
   2314 		if ((reg & SWSM_SMBI) != 0)
   2315 			aprint_error_dev(sc->sc_dev,
   2316 			    "Please update the Bootagent\n");
   2317 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2318 	}
   2319 
   2320 	/*
   2321 	 * Defer printing the EEPROM type until after verifying the checksum
   2322 	 * This allows the EEPROM type to be printed correctly in the case
   2323 	 * that no EEPROM is attached.
   2324 	 */
   2325 	/*
   2326 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2327 	 * this for later, so we can fail future reads from the EEPROM.
   2328 	 */
   2329 	if (wm_nvm_validate_checksum(sc)) {
   2330 		/*
   2331 		 * Read twice again because some PCI-e parts fail the
   2332 		 * first check due to the link being in sleep state.
   2333 		 */
   2334 		if (wm_nvm_validate_checksum(sc))
   2335 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2336 	}
   2337 
   2338 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2339 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2340 	else {
   2341 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2342 		    sc->sc_nvm_wordsize);
   2343 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2344 			aprint_verbose("iNVM");
   2345 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2346 			aprint_verbose("FLASH(HW)");
   2347 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2348 			aprint_verbose("FLASH");
   2349 		else {
   2350 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2351 				eetype = "SPI";
   2352 			else
   2353 				eetype = "MicroWire";
   2354 			aprint_verbose("(%d address bits) %s EEPROM",
   2355 			    sc->sc_nvm_addrbits, eetype);
   2356 		}
   2357 	}
   2358 	wm_nvm_version(sc);
   2359 	aprint_verbose("\n");
   2360 
   2361 	/*
   2362 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2363 	 * incorrect.
   2364 	 */
   2365 	wm_gmii_setup_phytype(sc, 0, 0);
   2366 
   2367 	/* Reset the chip to a known state. */
   2368 	wm_reset(sc);
   2369 
   2370 	/*
   2371 	 * Check for I21[01] PLL workaround.
   2372 	 *
   2373 	 * Three cases:
   2374 	 * a) Chip is I211.
   2375 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2376 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2377 	 */
   2378 	if (sc->sc_type == WM_T_I211)
   2379 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2380 	if (sc->sc_type == WM_T_I210) {
   2381 		if (!wm_nvm_flash_presence_i210(sc))
   2382 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2383 		else if ((sc->sc_nvm_ver_major < 3)
   2384 		    || ((sc->sc_nvm_ver_major == 3)
   2385 			&& (sc->sc_nvm_ver_minor < 25))) {
   2386 			aprint_verbose_dev(sc->sc_dev,
   2387 			    "ROM image version %d.%d is older than 3.25\n",
   2388 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2389 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2390 		}
   2391 	}
   2392 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2393 		wm_pll_workaround_i210(sc);
   2394 
   2395 	wm_get_wakeup(sc);
   2396 
   2397 	/* Non-AMT based hardware can now take control from firmware */
   2398 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2399 		wm_get_hw_control(sc);
   2400 
   2401 	/*
   2402 	 * Read the Ethernet address from the EEPROM, if not first found
   2403 	 * in device properties.
   2404 	 */
   2405 	ea = prop_dictionary_get(dict, "mac-address");
   2406 	if (ea != NULL) {
   2407 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2408 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2409 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2410 	} else {
   2411 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2412 			aprint_error_dev(sc->sc_dev,
   2413 			    "unable to read Ethernet address\n");
   2414 			goto out;
   2415 		}
   2416 	}
   2417 
   2418 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2419 	    ether_sprintf(enaddr));
   2420 
   2421 	/*
   2422 	 * Read the config info from the EEPROM, and set up various
   2423 	 * bits in the control registers based on their contents.
   2424 	 */
   2425 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2426 	if (pn != NULL) {
   2427 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2428 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2429 	} else {
   2430 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2431 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2432 			goto out;
   2433 		}
   2434 	}
   2435 
   2436 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2437 	if (pn != NULL) {
   2438 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2439 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2440 	} else {
   2441 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2442 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2443 			goto out;
   2444 		}
   2445 	}
   2446 
   2447 	/* check for WM_F_WOL */
   2448 	switch (sc->sc_type) {
   2449 	case WM_T_82542_2_0:
   2450 	case WM_T_82542_2_1:
   2451 	case WM_T_82543:
   2452 		/* dummy? */
   2453 		eeprom_data = 0;
   2454 		apme_mask = NVM_CFG3_APME;
   2455 		break;
   2456 	case WM_T_82544:
   2457 		apme_mask = NVM_CFG2_82544_APM_EN;
   2458 		eeprom_data = cfg2;
   2459 		break;
   2460 	case WM_T_82546:
   2461 	case WM_T_82546_3:
   2462 	case WM_T_82571:
   2463 	case WM_T_82572:
   2464 	case WM_T_82573:
   2465 	case WM_T_82574:
   2466 	case WM_T_82583:
   2467 	case WM_T_80003:
   2468 	default:
   2469 		apme_mask = NVM_CFG3_APME;
   2470 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2471 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2472 		break;
   2473 	case WM_T_82575:
   2474 	case WM_T_82576:
   2475 	case WM_T_82580:
   2476 	case WM_T_I350:
   2477 	case WM_T_I354: /* XXX ok? */
   2478 	case WM_T_ICH8:
   2479 	case WM_T_ICH9:
   2480 	case WM_T_ICH10:
   2481 	case WM_T_PCH:
   2482 	case WM_T_PCH2:
   2483 	case WM_T_PCH_LPT:
   2484 	case WM_T_PCH_SPT:
   2485 	case WM_T_PCH_CNP:
   2486 		/* XXX The funcid should be checked on some devices */
   2487 		apme_mask = WUC_APME;
   2488 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2489 		break;
   2490 	}
   2491 
   2492 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2493 	if ((eeprom_data & apme_mask) != 0)
   2494 		sc->sc_flags |= WM_F_WOL;
   2495 
   2496 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2497 		/* Check NVM for autonegotiation */
   2498 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2499 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2500 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2501 		}
   2502 	}
   2503 
   2504 	/*
   2505 	 * XXX need special handling for some multiple port cards
   2506 	 * to disable a paticular port.
   2507 	 */
   2508 
   2509 	if (sc->sc_type >= WM_T_82544) {
   2510 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2511 		if (pn != NULL) {
   2512 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2513 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2514 		} else {
   2515 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2516 				aprint_error_dev(sc->sc_dev,
   2517 				    "unable to read SWDPIN\n");
   2518 				goto out;
   2519 			}
   2520 		}
   2521 	}
   2522 
   2523 	if (cfg1 & NVM_CFG1_ILOS)
   2524 		sc->sc_ctrl |= CTRL_ILOS;
   2525 
   2526 	/*
   2527 	 * XXX
   2528 	 * This code isn't correct because pin 2 and 3 are located
   2529 	 * in different position on newer chips. Check all datasheet.
   2530 	 *
   2531 	 * Until resolve this problem, check if a chip < 82580
   2532 	 */
   2533 	if (sc->sc_type <= WM_T_82580) {
   2534 		if (sc->sc_type >= WM_T_82544) {
   2535 			sc->sc_ctrl |=
   2536 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2537 			    CTRL_SWDPIO_SHIFT;
   2538 			sc->sc_ctrl |=
   2539 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2540 			    CTRL_SWDPINS_SHIFT;
   2541 		} else {
   2542 			sc->sc_ctrl |=
   2543 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2544 			    CTRL_SWDPIO_SHIFT;
   2545 		}
   2546 	}
   2547 
   2548 	/* XXX For other than 82580? */
   2549 	if (sc->sc_type == WM_T_82580) {
   2550 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2551 		if (nvmword & __BIT(13))
   2552 			sc->sc_ctrl |= CTRL_ILOS;
   2553 	}
   2554 
   2555 #if 0
   2556 	if (sc->sc_type >= WM_T_82544) {
   2557 		if (cfg1 & NVM_CFG1_IPS0)
   2558 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2559 		if (cfg1 & NVM_CFG1_IPS1)
   2560 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2561 		sc->sc_ctrl_ext |=
   2562 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2563 		    CTRL_EXT_SWDPIO_SHIFT;
   2564 		sc->sc_ctrl_ext |=
   2565 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2566 		    CTRL_EXT_SWDPINS_SHIFT;
   2567 	} else {
   2568 		sc->sc_ctrl_ext |=
   2569 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2570 		    CTRL_EXT_SWDPIO_SHIFT;
   2571 	}
   2572 #endif
   2573 
   2574 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2575 #if 0
   2576 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2577 #endif
   2578 
   2579 	if (sc->sc_type == WM_T_PCH) {
   2580 		uint16_t val;
   2581 
   2582 		/* Save the NVM K1 bit setting */
   2583 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2584 
   2585 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2586 			sc->sc_nvm_k1_enabled = 1;
   2587 		else
   2588 			sc->sc_nvm_k1_enabled = 0;
   2589 	}
   2590 
   2591 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2592 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2593 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2594 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2595 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2596 	    || sc->sc_type == WM_T_82573
   2597 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2598 		/* Copper only */
   2599 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2600 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2601 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2602 	    || (sc->sc_type ==WM_T_I211)) {
   2603 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2604 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2605 		switch (link_mode) {
   2606 		case CTRL_EXT_LINK_MODE_1000KX:
   2607 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2608 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2609 			break;
   2610 		case CTRL_EXT_LINK_MODE_SGMII:
   2611 			if (wm_sgmii_uses_mdio(sc)) {
   2612 				aprint_verbose_dev(sc->sc_dev,
   2613 				    "SGMII(MDIO)\n");
   2614 				sc->sc_flags |= WM_F_SGMII;
   2615 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2616 				break;
   2617 			}
   2618 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2619 			/*FALLTHROUGH*/
   2620 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2621 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2622 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2623 				if (link_mode
   2624 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2625 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2626 					sc->sc_flags |= WM_F_SGMII;
   2627 				} else {
   2628 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2629 					aprint_verbose_dev(sc->sc_dev,
   2630 					    "SERDES\n");
   2631 				}
   2632 				break;
   2633 			}
   2634 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2635 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2636 
   2637 			/* Change current link mode setting */
   2638 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2639 			switch (sc->sc_mediatype) {
   2640 			case WM_MEDIATYPE_COPPER:
   2641 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2642 				break;
   2643 			case WM_MEDIATYPE_SERDES:
   2644 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2645 				break;
   2646 			default:
   2647 				break;
   2648 			}
   2649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2650 			break;
   2651 		case CTRL_EXT_LINK_MODE_GMII:
   2652 		default:
   2653 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2654 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2655 			break;
   2656 		}
   2657 
   2658 		reg &= ~CTRL_EXT_I2C_ENA;
   2659 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2660 			reg |= CTRL_EXT_I2C_ENA;
   2661 		else
   2662 			reg &= ~CTRL_EXT_I2C_ENA;
   2663 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2664 	} else if (sc->sc_type < WM_T_82543 ||
   2665 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2666 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2667 			aprint_error_dev(sc->sc_dev,
   2668 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2669 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2670 		}
   2671 	} else {
   2672 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2673 			aprint_error_dev(sc->sc_dev,
   2674 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2675 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2676 		}
   2677 	}
   2678 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2679 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2680 
   2681 	/* Set device properties (macflags) */
   2682 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2683 
   2684 	/* Initialize the media structures accordingly. */
   2685 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2686 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2687 	else
   2688 		wm_tbi_mediainit(sc); /* All others */
   2689 
   2690 	ifp = &sc->sc_ethercom.ec_if;
   2691 	xname = device_xname(sc->sc_dev);
   2692 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2693 	ifp->if_softc = sc;
   2694 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2695 #ifdef WM_MPSAFE
   2696 	ifp->if_extflags = IFEF_MPSAFE;
   2697 #endif
   2698 	ifp->if_ioctl = wm_ioctl;
   2699 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2700 		ifp->if_start = wm_nq_start;
   2701 		/*
   2702 		 * When the number of CPUs is one and the controller can use
   2703 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2704 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2705 		 * and the other is used for link status changing.
   2706 		 * In this situation, wm_nq_transmit() is disadvantageous
   2707 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2708 		 */
   2709 		if (wm_is_using_multiqueue(sc))
   2710 			ifp->if_transmit = wm_nq_transmit;
   2711 	} else {
   2712 		ifp->if_start = wm_start;
   2713 		/*
   2714 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2715 		 */
   2716 		if (wm_is_using_multiqueue(sc))
   2717 			ifp->if_transmit = wm_transmit;
   2718 	}
   2719 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2720 	ifp->if_init = wm_init;
   2721 	ifp->if_stop = wm_stop;
   2722 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2723 	IFQ_SET_READY(&ifp->if_snd);
   2724 
   2725 	/* Check for jumbo frame */
   2726 	switch (sc->sc_type) {
   2727 	case WM_T_82573:
   2728 		/* XXX limited to 9234 if ASPM is disabled */
   2729 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2730 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2731 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2732 		break;
   2733 	case WM_T_82571:
   2734 	case WM_T_82572:
   2735 	case WM_T_82574:
   2736 	case WM_T_82583:
   2737 	case WM_T_82575:
   2738 	case WM_T_82576:
   2739 	case WM_T_82580:
   2740 	case WM_T_I350:
   2741 	case WM_T_I354:
   2742 	case WM_T_I210:
   2743 	case WM_T_I211:
   2744 	case WM_T_80003:
   2745 	case WM_T_ICH9:
   2746 	case WM_T_ICH10:
   2747 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2748 	case WM_T_PCH_LPT:
   2749 	case WM_T_PCH_SPT:
   2750 	case WM_T_PCH_CNP:
   2751 		/* XXX limited to 9234 */
   2752 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2753 		break;
   2754 	case WM_T_PCH:
   2755 		/* XXX limited to 4096 */
   2756 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2757 		break;
   2758 	case WM_T_82542_2_0:
   2759 	case WM_T_82542_2_1:
   2760 	case WM_T_ICH8:
   2761 		/* No support for jumbo frame */
   2762 		break;
   2763 	default:
   2764 		/* ETHER_MAX_LEN_JUMBO */
   2765 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2766 		break;
   2767 	}
   2768 
   2769 	/* If we're a i82543 or greater, we can support VLANs. */
   2770 	if (sc->sc_type >= WM_T_82543)
   2771 		sc->sc_ethercom.ec_capabilities |=
   2772 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2773 
   2774 	/*
   2775 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2776 	 * on i82543 and later.
   2777 	 */
   2778 	if (sc->sc_type >= WM_T_82543) {
   2779 		ifp->if_capabilities |=
   2780 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2781 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2782 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2783 		    IFCAP_CSUM_TCPv6_Tx |
   2784 		    IFCAP_CSUM_UDPv6_Tx;
   2785 	}
   2786 
   2787 	/*
   2788 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2789 	 *
   2790 	 *	82541GI (8086:1076) ... no
   2791 	 *	82572EI (8086:10b9) ... yes
   2792 	 */
   2793 	if (sc->sc_type >= WM_T_82571) {
   2794 		ifp->if_capabilities |=
   2795 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2796 	}
   2797 
   2798 	/*
   2799 	 * If we're a i82544 or greater (except i82547), we can do
   2800 	 * TCP segmentation offload.
   2801 	 */
   2802 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2803 		ifp->if_capabilities |= IFCAP_TSOv4;
   2804 	}
   2805 
   2806 	if (sc->sc_type >= WM_T_82571) {
   2807 		ifp->if_capabilities |= IFCAP_TSOv6;
   2808 	}
   2809 
   2810 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2811 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2812 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2813 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2814 
   2815 #ifdef WM_MPSAFE
   2816 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2817 #else
   2818 	sc->sc_core_lock = NULL;
   2819 #endif
   2820 
   2821 	/* Attach the interface. */
   2822 	error = if_initialize(ifp);
   2823 	if (error != 0) {
   2824 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2825 		    error);
   2826 		return; /* Error */
   2827 	}
   2828 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2829 	ether_ifattach(ifp, enaddr);
   2830 	if_register(ifp);
   2831 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2832 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2833 			  RND_FLAG_DEFAULT);
   2834 
   2835 #ifdef WM_EVENT_COUNTERS
   2836 	/* Attach event counters. */
   2837 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2838 	    NULL, xname, "linkintr");
   2839 
   2840 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2841 	    NULL, xname, "tx_xoff");
   2842 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2843 	    NULL, xname, "tx_xon");
   2844 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2845 	    NULL, xname, "rx_xoff");
   2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2847 	    NULL, xname, "rx_xon");
   2848 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2849 	    NULL, xname, "rx_macctl");
   2850 #endif /* WM_EVENT_COUNTERS */
   2851 
   2852 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2853 		pmf_class_network_register(self, ifp);
   2854 	else
   2855 		aprint_error_dev(self, "couldn't establish power handler\n");
   2856 
   2857 	sc->sc_flags |= WM_F_ATTACHED;
   2858  out:
   2859 	return;
   2860 }
   2861 
   2862 /* The detach function (ca_detach) */
   2863 static int
   2864 wm_detach(device_t self, int flags __unused)
   2865 {
   2866 	struct wm_softc *sc = device_private(self);
   2867 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2868 	int i;
   2869 
   2870 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2871 		return 0;
   2872 
   2873 	/* Stop the interface. Callouts are stopped in it. */
   2874 	wm_stop(ifp, 1);
   2875 
   2876 	pmf_device_deregister(self);
   2877 
   2878 #ifdef WM_EVENT_COUNTERS
   2879 	evcnt_detach(&sc->sc_ev_linkintr);
   2880 
   2881 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2882 	evcnt_detach(&sc->sc_ev_tx_xon);
   2883 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2884 	evcnt_detach(&sc->sc_ev_rx_xon);
   2885 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2886 #endif /* WM_EVENT_COUNTERS */
   2887 
   2888 	/* Tell the firmware about the release */
   2889 	WM_CORE_LOCK(sc);
   2890 	wm_release_manageability(sc);
   2891 	wm_release_hw_control(sc);
   2892 	wm_enable_wakeup(sc);
   2893 	WM_CORE_UNLOCK(sc);
   2894 
   2895 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2896 
   2897 	/* Delete all remaining media. */
   2898 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2899 
   2900 	ether_ifdetach(ifp);
   2901 	if_detach(ifp);
   2902 	if_percpuq_destroy(sc->sc_ipq);
   2903 
   2904 	/* Unload RX dmamaps and free mbufs */
   2905 	for (i = 0; i < sc->sc_nqueues; i++) {
   2906 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2907 		mutex_enter(rxq->rxq_lock);
   2908 		wm_rxdrain(rxq);
   2909 		mutex_exit(rxq->rxq_lock);
   2910 	}
   2911 	/* Must unlock here */
   2912 
   2913 	/* Disestablish the interrupt handler */
   2914 	for (i = 0; i < sc->sc_nintrs; i++) {
   2915 		if (sc->sc_ihs[i] != NULL) {
   2916 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2917 			sc->sc_ihs[i] = NULL;
   2918 		}
   2919 	}
   2920 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2921 
   2922 	wm_free_txrx_queues(sc);
   2923 
   2924 	/* Unmap the registers */
   2925 	if (sc->sc_ss) {
   2926 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2927 		sc->sc_ss = 0;
   2928 	}
   2929 	if (sc->sc_ios) {
   2930 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2931 		sc->sc_ios = 0;
   2932 	}
   2933 	if (sc->sc_flashs) {
   2934 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2935 		sc->sc_flashs = 0;
   2936 	}
   2937 
   2938 	if (sc->sc_core_lock)
   2939 		mutex_obj_free(sc->sc_core_lock);
   2940 	if (sc->sc_ich_phymtx)
   2941 		mutex_obj_free(sc->sc_ich_phymtx);
   2942 	if (sc->sc_ich_nvmmtx)
   2943 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2944 
   2945 	return 0;
   2946 }
   2947 
   2948 static bool
   2949 wm_suspend(device_t self, const pmf_qual_t *qual)
   2950 {
   2951 	struct wm_softc *sc = device_private(self);
   2952 
   2953 	wm_release_manageability(sc);
   2954 	wm_release_hw_control(sc);
   2955 	wm_enable_wakeup(sc);
   2956 
   2957 	return true;
   2958 }
   2959 
   2960 static bool
   2961 wm_resume(device_t self, const pmf_qual_t *qual)
   2962 {
   2963 	struct wm_softc *sc = device_private(self);
   2964 
   2965 	/* Disable ASPM L0s and/or L1 for workaround */
   2966 	wm_disable_aspm(sc);
   2967 	wm_init_manageability(sc);
   2968 
   2969 	return true;
   2970 }
   2971 
   2972 /*
   2973  * wm_watchdog:		[ifnet interface function]
   2974  *
   2975  *	Watchdog timer handler.
   2976  */
   2977 static void
   2978 wm_watchdog(struct ifnet *ifp)
   2979 {
   2980 	int qid;
   2981 	struct wm_softc *sc = ifp->if_softc;
   2982 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2983 
   2984 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2985 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2986 
   2987 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2988 	}
   2989 
   2990 	/*
   2991 	 * IF any of queues hanged up, reset the interface.
   2992 	 */
   2993 	if (hang_queue != 0) {
   2994 		(void) wm_init(ifp);
   2995 
   2996 		/*
   2997 		 * There are still some upper layer processing which call
   2998 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   2999 		 */
   3000 		/* Try to get more packets going. */
   3001 		ifp->if_start(ifp);
   3002 	}
   3003 }
   3004 
   3005 
   3006 static void
   3007 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3008 {
   3009 
   3010 	mutex_enter(txq->txq_lock);
   3011 	if (txq->txq_watchdog &&
   3012 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3013 		wm_watchdog_txq_locked(ifp, txq, hang);
   3014 	}
   3015 	mutex_exit(txq->txq_lock);
   3016 }
   3017 
   3018 static void
   3019 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3020 {
   3021 	struct wm_softc *sc = ifp->if_softc;
   3022 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3023 
   3024 	KASSERT(mutex_owned(txq->txq_lock));
   3025 
   3026 	/*
   3027 	 * Since we're using delayed interrupts, sweep up
   3028 	 * before we report an error.
   3029 	 */
   3030 	wm_txeof(txq, UINT_MAX);
   3031 	if (txq->txq_watchdog)
   3032 		*hang |= __BIT(wmq->wmq_id);
   3033 
   3034 	if (txq->txq_free != WM_NTXDESC(txq)) {
   3035 #ifdef WM_DEBUG
   3036 		int i, j;
   3037 		struct wm_txsoft *txs;
   3038 #endif
   3039 		log(LOG_ERR,
   3040 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3041 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3042 		    txq->txq_next);
   3043 		ifp->if_oerrors++;
   3044 #ifdef WM_DEBUG
   3045 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3046 		    i = WM_NEXTTXS(txq, i)) {
   3047 		    txs = &txq->txq_soft[i];
   3048 		    printf("txs %d tx %d -> %d\n",
   3049 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3050 		    for (j = txs->txs_firstdesc; ;
   3051 			j = WM_NEXTTX(txq, j)) {
   3052 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3053 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3054 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3055 				    printf("\t %#08x%08x\n",
   3056 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3057 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3058 			    } else {
   3059 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3060 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3061 					txq->txq_descs[j].wtx_addr.wa_low);
   3062 				    printf("\t %#04x%02x%02x%08x\n",
   3063 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3064 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3065 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3066 					txq->txq_descs[j].wtx_cmdlen);
   3067 			    }
   3068 			if (j == txs->txs_lastdesc)
   3069 				break;
   3070 			}
   3071 		}
   3072 #endif
   3073 	}
   3074 }
   3075 
   3076 /*
   3077  * wm_tick:
   3078  *
   3079  *	One second timer, used to check link status, sweep up
   3080  *	completed transmit jobs, etc.
   3081  */
   3082 static void
   3083 wm_tick(void *arg)
   3084 {
   3085 	struct wm_softc *sc = arg;
   3086 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3087 #ifndef WM_MPSAFE
   3088 	int s = splnet();
   3089 #endif
   3090 
   3091 	WM_CORE_LOCK(sc);
   3092 
   3093 	if (sc->sc_core_stopping) {
   3094 		WM_CORE_UNLOCK(sc);
   3095 #ifndef WM_MPSAFE
   3096 		splx(s);
   3097 #endif
   3098 		return;
   3099 	}
   3100 
   3101 	if (sc->sc_type >= WM_T_82542_2_1) {
   3102 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3103 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3104 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3105 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3106 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3107 	}
   3108 
   3109 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3110 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3111 	    + CSR_READ(sc, WMREG_CRCERRS)
   3112 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3113 	    + CSR_READ(sc, WMREG_SYMERRC)
   3114 	    + CSR_READ(sc, WMREG_RXERRC)
   3115 	    + CSR_READ(sc, WMREG_SEC)
   3116 	    + CSR_READ(sc, WMREG_CEXTERR)
   3117 	    + CSR_READ(sc, WMREG_RLEC);
   3118 	/*
   3119 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3120 	 * memory. It does not mean the number of dropped packet. Because
   3121 	 * ethernet controller can receive packets in such case if there is
   3122 	 * space in phy's FIFO.
   3123 	 *
   3124 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3125 	 * own EVCNT instead of if_iqdrops.
   3126 	 */
   3127 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3128 
   3129 	if (sc->sc_flags & WM_F_HAS_MII)
   3130 		mii_tick(&sc->sc_mii);
   3131 	else if ((sc->sc_type >= WM_T_82575)
   3132 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3133 		wm_serdes_tick(sc);
   3134 	else
   3135 		wm_tbi_tick(sc);
   3136 
   3137 	WM_CORE_UNLOCK(sc);
   3138 
   3139 	wm_watchdog(ifp);
   3140 
   3141 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3142 }
   3143 
   3144 static int
   3145 wm_ifflags_cb(struct ethercom *ec)
   3146 {
   3147 	struct ifnet *ifp = &ec->ec_if;
   3148 	struct wm_softc *sc = ifp->if_softc;
   3149 	int rc = 0;
   3150 
   3151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3152 		device_xname(sc->sc_dev), __func__));
   3153 
   3154 	WM_CORE_LOCK(sc);
   3155 
   3156 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3157 	sc->sc_if_flags = ifp->if_flags;
   3158 
   3159 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3160 		rc = ENETRESET;
   3161 		goto out;
   3162 	}
   3163 
   3164 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3165 		wm_set_filter(sc);
   3166 
   3167 	wm_set_vlan(sc);
   3168 
   3169 out:
   3170 	WM_CORE_UNLOCK(sc);
   3171 
   3172 	return rc;
   3173 }
   3174 
   3175 /*
   3176  * wm_ioctl:		[ifnet interface function]
   3177  *
   3178  *	Handle control requests from the operator.
   3179  */
   3180 static int
   3181 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3182 {
   3183 	struct wm_softc *sc = ifp->if_softc;
   3184 	struct ifreq *ifr = (struct ifreq *) data;
   3185 	struct ifaddr *ifa = (struct ifaddr *)data;
   3186 	struct sockaddr_dl *sdl;
   3187 	int s, error;
   3188 
   3189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3190 		device_xname(sc->sc_dev), __func__));
   3191 
   3192 #ifndef WM_MPSAFE
   3193 	s = splnet();
   3194 #endif
   3195 	switch (cmd) {
   3196 	case SIOCSIFMEDIA:
   3197 	case SIOCGIFMEDIA:
   3198 		WM_CORE_LOCK(sc);
   3199 		/* Flow control requires full-duplex mode. */
   3200 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3201 		    (ifr->ifr_media & IFM_FDX) == 0)
   3202 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3203 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3204 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3205 				/* We can do both TXPAUSE and RXPAUSE. */
   3206 				ifr->ifr_media |=
   3207 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3208 			}
   3209 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3210 		}
   3211 		WM_CORE_UNLOCK(sc);
   3212 #ifdef WM_MPSAFE
   3213 		s = splnet();
   3214 #endif
   3215 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3216 #ifdef WM_MPSAFE
   3217 		splx(s);
   3218 #endif
   3219 		break;
   3220 	case SIOCINITIFADDR:
   3221 		WM_CORE_LOCK(sc);
   3222 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3223 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3224 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3225 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3226 			/* unicast address is first multicast entry */
   3227 			wm_set_filter(sc);
   3228 			error = 0;
   3229 			WM_CORE_UNLOCK(sc);
   3230 			break;
   3231 		}
   3232 		WM_CORE_UNLOCK(sc);
   3233 		/*FALLTHROUGH*/
   3234 	default:
   3235 #ifdef WM_MPSAFE
   3236 		s = splnet();
   3237 #endif
   3238 		/* It may call wm_start, so unlock here */
   3239 		error = ether_ioctl(ifp, cmd, data);
   3240 #ifdef WM_MPSAFE
   3241 		splx(s);
   3242 #endif
   3243 		if (error != ENETRESET)
   3244 			break;
   3245 
   3246 		error = 0;
   3247 
   3248 		if (cmd == SIOCSIFCAP) {
   3249 			error = (*ifp->if_init)(ifp);
   3250 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3251 			;
   3252 		else if (ifp->if_flags & IFF_RUNNING) {
   3253 			/*
   3254 			 * Multicast list has changed; set the hardware filter
   3255 			 * accordingly.
   3256 			 */
   3257 			WM_CORE_LOCK(sc);
   3258 			wm_set_filter(sc);
   3259 			WM_CORE_UNLOCK(sc);
   3260 		}
   3261 		break;
   3262 	}
   3263 
   3264 #ifndef WM_MPSAFE
   3265 	splx(s);
   3266 #endif
   3267 	return error;
   3268 }
   3269 
   3270 /* MAC address related */
   3271 
   3272 /*
   3273  * Get the offset of MAC address and return it.
   3274  * If error occured, use offset 0.
   3275  */
   3276 static uint16_t
   3277 wm_check_alt_mac_addr(struct wm_softc *sc)
   3278 {
   3279 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3280 	uint16_t offset = NVM_OFF_MACADDR;
   3281 
   3282 	/* Try to read alternative MAC address pointer */
   3283 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3284 		return 0;
   3285 
   3286 	/* Check pointer if it's valid or not. */
   3287 	if ((offset == 0x0000) || (offset == 0xffff))
   3288 		return 0;
   3289 
   3290 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3291 	/*
   3292 	 * Check whether alternative MAC address is valid or not.
   3293 	 * Some cards have non 0xffff pointer but those don't use
   3294 	 * alternative MAC address in reality.
   3295 	 *
   3296 	 * Check whether the broadcast bit is set or not.
   3297 	 */
   3298 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3299 		if (((myea[0] & 0xff) & 0x01) == 0)
   3300 			return offset; /* Found */
   3301 
   3302 	/* Not found */
   3303 	return 0;
   3304 }
   3305 
   3306 static int
   3307 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3308 {
   3309 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3310 	uint16_t offset = NVM_OFF_MACADDR;
   3311 	int do_invert = 0;
   3312 
   3313 	switch (sc->sc_type) {
   3314 	case WM_T_82580:
   3315 	case WM_T_I350:
   3316 	case WM_T_I354:
   3317 		/* EEPROM Top Level Partitioning */
   3318 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3319 		break;
   3320 	case WM_T_82571:
   3321 	case WM_T_82575:
   3322 	case WM_T_82576:
   3323 	case WM_T_80003:
   3324 	case WM_T_I210:
   3325 	case WM_T_I211:
   3326 		offset = wm_check_alt_mac_addr(sc);
   3327 		if (offset == 0)
   3328 			if ((sc->sc_funcid & 0x01) == 1)
   3329 				do_invert = 1;
   3330 		break;
   3331 	default:
   3332 		if ((sc->sc_funcid & 0x01) == 1)
   3333 			do_invert = 1;
   3334 		break;
   3335 	}
   3336 
   3337 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3338 		goto bad;
   3339 
   3340 	enaddr[0] = myea[0] & 0xff;
   3341 	enaddr[1] = myea[0] >> 8;
   3342 	enaddr[2] = myea[1] & 0xff;
   3343 	enaddr[3] = myea[1] >> 8;
   3344 	enaddr[4] = myea[2] & 0xff;
   3345 	enaddr[5] = myea[2] >> 8;
   3346 
   3347 	/*
   3348 	 * Toggle the LSB of the MAC address on the second port
   3349 	 * of some dual port cards.
   3350 	 */
   3351 	if (do_invert != 0)
   3352 		enaddr[5] ^= 1;
   3353 
   3354 	return 0;
   3355 
   3356  bad:
   3357 	return -1;
   3358 }
   3359 
   3360 /*
   3361  * wm_set_ral:
   3362  *
   3363  *	Set an entery in the receive address list.
   3364  */
   3365 static void
   3366 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3367 {
   3368 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3369 	uint32_t wlock_mac;
   3370 	int rv;
   3371 
   3372 	if (enaddr != NULL) {
   3373 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3374 		    (enaddr[3] << 24);
   3375 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3376 		ral_hi |= RAL_AV;
   3377 	} else {
   3378 		ral_lo = 0;
   3379 		ral_hi = 0;
   3380 	}
   3381 
   3382 	switch (sc->sc_type) {
   3383 	case WM_T_82542_2_0:
   3384 	case WM_T_82542_2_1:
   3385 	case WM_T_82543:
   3386 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3387 		CSR_WRITE_FLUSH(sc);
   3388 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3389 		CSR_WRITE_FLUSH(sc);
   3390 		break;
   3391 	case WM_T_PCH2:
   3392 	case WM_T_PCH_LPT:
   3393 	case WM_T_PCH_SPT:
   3394 	case WM_T_PCH_CNP:
   3395 		if (idx == 0) {
   3396 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3397 			CSR_WRITE_FLUSH(sc);
   3398 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3399 			CSR_WRITE_FLUSH(sc);
   3400 			return;
   3401 		}
   3402 		if (sc->sc_type != WM_T_PCH2) {
   3403 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3404 			    FWSM_WLOCK_MAC);
   3405 			addrl = WMREG_SHRAL(idx - 1);
   3406 			addrh = WMREG_SHRAH(idx - 1);
   3407 		} else {
   3408 			wlock_mac = 0;
   3409 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3410 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3411 		}
   3412 
   3413 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3414 			rv = wm_get_swflag_ich8lan(sc);
   3415 			if (rv != 0)
   3416 				return;
   3417 			CSR_WRITE(sc, addrl, ral_lo);
   3418 			CSR_WRITE_FLUSH(sc);
   3419 			CSR_WRITE(sc, addrh, ral_hi);
   3420 			CSR_WRITE_FLUSH(sc);
   3421 			wm_put_swflag_ich8lan(sc);
   3422 		}
   3423 
   3424 		break;
   3425 	default:
   3426 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3427 		CSR_WRITE_FLUSH(sc);
   3428 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3429 		CSR_WRITE_FLUSH(sc);
   3430 		break;
   3431 	}
   3432 }
   3433 
   3434 /*
   3435  * wm_mchash:
   3436  *
   3437  *	Compute the hash of the multicast address for the 4096-bit
   3438  *	multicast filter.
   3439  */
   3440 static uint32_t
   3441 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3442 {
   3443 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3444 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3445 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3446 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3447 	uint32_t hash;
   3448 
   3449 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3450 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3451 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3452 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3453 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3454 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3455 		return (hash & 0x3ff);
   3456 	}
   3457 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3458 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3459 
   3460 	return (hash & 0xfff);
   3461 }
   3462 
   3463 /*
   3464  * wm_set_filter:
   3465  *
   3466  *	Set up the receive filter.
   3467  */
   3468 static void
   3469 wm_set_filter(struct wm_softc *sc)
   3470 {
   3471 	struct ethercom *ec = &sc->sc_ethercom;
   3472 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3473 	struct ether_multi *enm;
   3474 	struct ether_multistep step;
   3475 	bus_addr_t mta_reg;
   3476 	uint32_t hash, reg, bit;
   3477 	int i, size, ralmax;
   3478 
   3479 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3480 		device_xname(sc->sc_dev), __func__));
   3481 
   3482 	if (sc->sc_type >= WM_T_82544)
   3483 		mta_reg = WMREG_CORDOVA_MTA;
   3484 	else
   3485 		mta_reg = WMREG_MTA;
   3486 
   3487 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3488 
   3489 	if (ifp->if_flags & IFF_BROADCAST)
   3490 		sc->sc_rctl |= RCTL_BAM;
   3491 	if (ifp->if_flags & IFF_PROMISC) {
   3492 		sc->sc_rctl |= RCTL_UPE;
   3493 		goto allmulti;
   3494 	}
   3495 
   3496 	/*
   3497 	 * Set the station address in the first RAL slot, and
   3498 	 * clear the remaining slots.
   3499 	 */
   3500 	if (sc->sc_type == WM_T_ICH8)
   3501 		size = WM_RAL_TABSIZE_ICH8 -1;
   3502 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3503 	    || (sc->sc_type == WM_T_PCH))
   3504 		size = WM_RAL_TABSIZE_ICH8;
   3505 	else if (sc->sc_type == WM_T_PCH2)
   3506 		size = WM_RAL_TABSIZE_PCH2;
   3507 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3508 	    || (sc->sc_type == WM_T_PCH_CNP))
   3509 		size = WM_RAL_TABSIZE_PCH_LPT;
   3510 	else if (sc->sc_type == WM_T_82575)
   3511 		size = WM_RAL_TABSIZE_82575;
   3512 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3513 		size = WM_RAL_TABSIZE_82576;
   3514 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3515 		size = WM_RAL_TABSIZE_I350;
   3516 	else
   3517 		size = WM_RAL_TABSIZE;
   3518 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3519 
   3520 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3521 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3522 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3523 		switch (i) {
   3524 		case 0:
   3525 			/* We can use all entries */
   3526 			ralmax = size;
   3527 			break;
   3528 		case 1:
   3529 			/* Only RAR[0] */
   3530 			ralmax = 1;
   3531 			break;
   3532 		default:
   3533 			/* available SHRA + RAR[0] */
   3534 			ralmax = i + 1;
   3535 		}
   3536 	} else
   3537 		ralmax = size;
   3538 	for (i = 1; i < size; i++) {
   3539 		if (i < ralmax)
   3540 			wm_set_ral(sc, NULL, i);
   3541 	}
   3542 
   3543 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3544 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3545 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3546 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3547 		size = WM_ICH8_MC_TABSIZE;
   3548 	else
   3549 		size = WM_MC_TABSIZE;
   3550 	/* Clear out the multicast table. */
   3551 	for (i = 0; i < size; i++) {
   3552 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3553 		CSR_WRITE_FLUSH(sc);
   3554 	}
   3555 
   3556 	ETHER_LOCK(ec);
   3557 	ETHER_FIRST_MULTI(step, ec, enm);
   3558 	while (enm != NULL) {
   3559 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3560 			ETHER_UNLOCK(ec);
   3561 			/*
   3562 			 * We must listen to a range of multicast addresses.
   3563 			 * For now, just accept all multicasts, rather than
   3564 			 * trying to set only those filter bits needed to match
   3565 			 * the range.  (At this time, the only use of address
   3566 			 * ranges is for IP multicast routing, for which the
   3567 			 * range is big enough to require all bits set.)
   3568 			 */
   3569 			goto allmulti;
   3570 		}
   3571 
   3572 		hash = wm_mchash(sc, enm->enm_addrlo);
   3573 
   3574 		reg = (hash >> 5);
   3575 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3576 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3577 		    || (sc->sc_type == WM_T_PCH2)
   3578 		    || (sc->sc_type == WM_T_PCH_LPT)
   3579 		    || (sc->sc_type == WM_T_PCH_SPT)
   3580 		    || (sc->sc_type == WM_T_PCH_CNP))
   3581 			reg &= 0x1f;
   3582 		else
   3583 			reg &= 0x7f;
   3584 		bit = hash & 0x1f;
   3585 
   3586 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3587 		hash |= 1U << bit;
   3588 
   3589 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3590 			/*
   3591 			 * 82544 Errata 9: Certain register cannot be written
   3592 			 * with particular alignments in PCI-X bus operation
   3593 			 * (FCAH, MTA and VFTA).
   3594 			 */
   3595 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3596 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3597 			CSR_WRITE_FLUSH(sc);
   3598 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3599 			CSR_WRITE_FLUSH(sc);
   3600 		} else {
   3601 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3602 			CSR_WRITE_FLUSH(sc);
   3603 		}
   3604 
   3605 		ETHER_NEXT_MULTI(step, enm);
   3606 	}
   3607 	ETHER_UNLOCK(ec);
   3608 
   3609 	ifp->if_flags &= ~IFF_ALLMULTI;
   3610 	goto setit;
   3611 
   3612  allmulti:
   3613 	ifp->if_flags |= IFF_ALLMULTI;
   3614 	sc->sc_rctl |= RCTL_MPE;
   3615 
   3616  setit:
   3617 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3618 }
   3619 
   3620 /* Reset and init related */
   3621 
   3622 static void
   3623 wm_set_vlan(struct wm_softc *sc)
   3624 {
   3625 
   3626 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3627 		device_xname(sc->sc_dev), __func__));
   3628 
   3629 	/* Deal with VLAN enables. */
   3630 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3631 		sc->sc_ctrl |= CTRL_VME;
   3632 	else
   3633 		sc->sc_ctrl &= ~CTRL_VME;
   3634 
   3635 	/* Write the control registers. */
   3636 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3637 }
   3638 
   3639 static void
   3640 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3641 {
   3642 	uint32_t gcr;
   3643 	pcireg_t ctrl2;
   3644 
   3645 	gcr = CSR_READ(sc, WMREG_GCR);
   3646 
   3647 	/* Only take action if timeout value is defaulted to 0 */
   3648 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3649 		goto out;
   3650 
   3651 	if ((gcr & GCR_CAP_VER2) == 0) {
   3652 		gcr |= GCR_CMPL_TMOUT_10MS;
   3653 		goto out;
   3654 	}
   3655 
   3656 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3657 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3658 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3659 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3660 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3661 
   3662 out:
   3663 	/* Disable completion timeout resend */
   3664 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3665 
   3666 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3667 }
   3668 
   3669 void
   3670 wm_get_auto_rd_done(struct wm_softc *sc)
   3671 {
   3672 	int i;
   3673 
   3674 	/* wait for eeprom to reload */
   3675 	switch (sc->sc_type) {
   3676 	case WM_T_82571:
   3677 	case WM_T_82572:
   3678 	case WM_T_82573:
   3679 	case WM_T_82574:
   3680 	case WM_T_82583:
   3681 	case WM_T_82575:
   3682 	case WM_T_82576:
   3683 	case WM_T_82580:
   3684 	case WM_T_I350:
   3685 	case WM_T_I354:
   3686 	case WM_T_I210:
   3687 	case WM_T_I211:
   3688 	case WM_T_80003:
   3689 	case WM_T_ICH8:
   3690 	case WM_T_ICH9:
   3691 		for (i = 0; i < 10; i++) {
   3692 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3693 				break;
   3694 			delay(1000);
   3695 		}
   3696 		if (i == 10) {
   3697 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3698 			    "complete\n", device_xname(sc->sc_dev));
   3699 		}
   3700 		break;
   3701 	default:
   3702 		break;
   3703 	}
   3704 }
   3705 
   3706 void
   3707 wm_lan_init_done(struct wm_softc *sc)
   3708 {
   3709 	uint32_t reg = 0;
   3710 	int i;
   3711 
   3712 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3713 		device_xname(sc->sc_dev), __func__));
   3714 
   3715 	/* Wait for eeprom to reload */
   3716 	switch (sc->sc_type) {
   3717 	case WM_T_ICH10:
   3718 	case WM_T_PCH:
   3719 	case WM_T_PCH2:
   3720 	case WM_T_PCH_LPT:
   3721 	case WM_T_PCH_SPT:
   3722 	case WM_T_PCH_CNP:
   3723 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3724 			reg = CSR_READ(sc, WMREG_STATUS);
   3725 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3726 				break;
   3727 			delay(100);
   3728 		}
   3729 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3730 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3731 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3732 		}
   3733 		break;
   3734 	default:
   3735 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3736 		    __func__);
   3737 		break;
   3738 	}
   3739 
   3740 	reg &= ~STATUS_LAN_INIT_DONE;
   3741 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3742 }
   3743 
   3744 void
   3745 wm_get_cfg_done(struct wm_softc *sc)
   3746 {
   3747 	int mask;
   3748 	uint32_t reg;
   3749 	int i;
   3750 
   3751 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3752 		device_xname(sc->sc_dev), __func__));
   3753 
   3754 	/* Wait for eeprom to reload */
   3755 	switch (sc->sc_type) {
   3756 	case WM_T_82542_2_0:
   3757 	case WM_T_82542_2_1:
   3758 		/* null */
   3759 		break;
   3760 	case WM_T_82543:
   3761 	case WM_T_82544:
   3762 	case WM_T_82540:
   3763 	case WM_T_82545:
   3764 	case WM_T_82545_3:
   3765 	case WM_T_82546:
   3766 	case WM_T_82546_3:
   3767 	case WM_T_82541:
   3768 	case WM_T_82541_2:
   3769 	case WM_T_82547:
   3770 	case WM_T_82547_2:
   3771 	case WM_T_82573:
   3772 	case WM_T_82574:
   3773 	case WM_T_82583:
   3774 		/* generic */
   3775 		delay(10*1000);
   3776 		break;
   3777 	case WM_T_80003:
   3778 	case WM_T_82571:
   3779 	case WM_T_82572:
   3780 	case WM_T_82575:
   3781 	case WM_T_82576:
   3782 	case WM_T_82580:
   3783 	case WM_T_I350:
   3784 	case WM_T_I354:
   3785 	case WM_T_I210:
   3786 	case WM_T_I211:
   3787 		if (sc->sc_type == WM_T_82571) {
   3788 			/* Only 82571 shares port 0 */
   3789 			mask = EEMNGCTL_CFGDONE_0;
   3790 		} else
   3791 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3792 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3793 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3794 				break;
   3795 			delay(1000);
   3796 		}
   3797 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3798 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3799 				device_xname(sc->sc_dev), __func__));
   3800 		}
   3801 		break;
   3802 	case WM_T_ICH8:
   3803 	case WM_T_ICH9:
   3804 	case WM_T_ICH10:
   3805 	case WM_T_PCH:
   3806 	case WM_T_PCH2:
   3807 	case WM_T_PCH_LPT:
   3808 	case WM_T_PCH_SPT:
   3809 	case WM_T_PCH_CNP:
   3810 		delay(10*1000);
   3811 		if (sc->sc_type >= WM_T_ICH10)
   3812 			wm_lan_init_done(sc);
   3813 		else
   3814 			wm_get_auto_rd_done(sc);
   3815 
   3816 		reg = CSR_READ(sc, WMREG_STATUS);
   3817 		if ((reg & STATUS_PHYRA) != 0)
   3818 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3819 		break;
   3820 	default:
   3821 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3822 		    __func__);
   3823 		break;
   3824 	}
   3825 }
   3826 
   3827 void
   3828 wm_phy_post_reset(struct wm_softc *sc)
   3829 {
   3830 	uint32_t reg;
   3831 
   3832 	/* This function is only for ICH8 and newer. */
   3833 	if (sc->sc_type < WM_T_ICH8)
   3834 		return;
   3835 
   3836 	if (wm_phy_resetisblocked(sc)) {
   3837 		/* XXX */
   3838 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3839 		return;
   3840 	}
   3841 
   3842 	/* Allow time for h/w to get to quiescent state after reset */
   3843 	delay(10*1000);
   3844 
   3845 	/* Perform any necessary post-reset workarounds */
   3846 	if (sc->sc_type == WM_T_PCH)
   3847 		wm_hv_phy_workaround_ich8lan(sc);
   3848 	if (sc->sc_type == WM_T_PCH2)
   3849 		wm_lv_phy_workaround_ich8lan(sc);
   3850 
   3851 	/* Clear the host wakeup bit after lcd reset */
   3852 	if (sc->sc_type >= WM_T_PCH) {
   3853 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3854 		    BM_PORT_GEN_CFG);
   3855 		reg &= ~BM_WUC_HOST_WU_BIT;
   3856 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3857 		    BM_PORT_GEN_CFG, reg);
   3858 	}
   3859 
   3860 	/* Configure the LCD with the extended configuration region in NVM */
   3861 	wm_init_lcd_from_nvm(sc);
   3862 
   3863 	/* Configure the LCD with the OEM bits in NVM */
   3864 }
   3865 
   3866 /* Only for PCH and newer */
   3867 static void
   3868 wm_write_smbus_addr(struct wm_softc *sc)
   3869 {
   3870 	uint32_t strap, freq;
   3871 	uint32_t phy_data;
   3872 
   3873 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3874 		device_xname(sc->sc_dev), __func__));
   3875 
   3876 	strap = CSR_READ(sc, WMREG_STRAP);
   3877 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3878 
   3879 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3880 
   3881 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3882 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3883 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3884 
   3885 	if (sc->sc_phytype == WMPHY_I217) {
   3886 		/* Restore SMBus frequency */
   3887 		if (freq --) {
   3888 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3889 			    | HV_SMB_ADDR_FREQ_HIGH);
   3890 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3891 			    HV_SMB_ADDR_FREQ_LOW);
   3892 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3893 			    HV_SMB_ADDR_FREQ_HIGH);
   3894 		} else {
   3895 			DPRINTF(WM_DEBUG_INIT,
   3896 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3897 				device_xname(sc->sc_dev), __func__));
   3898 		}
   3899 	}
   3900 
   3901 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3902 }
   3903 
   3904 void
   3905 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3906 {
   3907 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3908 	uint16_t phy_page = 0;
   3909 
   3910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3911 		device_xname(sc->sc_dev), __func__));
   3912 
   3913 	switch (sc->sc_type) {
   3914 	case WM_T_ICH8:
   3915 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3916 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3917 			return;
   3918 
   3919 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3920 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3921 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3922 			break;
   3923 		}
   3924 		/* FALLTHROUGH */
   3925 	case WM_T_PCH:
   3926 	case WM_T_PCH2:
   3927 	case WM_T_PCH_LPT:
   3928 	case WM_T_PCH_SPT:
   3929 	case WM_T_PCH_CNP:
   3930 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3931 		break;
   3932 	default:
   3933 		return;
   3934 	}
   3935 
   3936 	sc->phy.acquire(sc);
   3937 
   3938 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3939 	if ((reg & sw_cfg_mask) == 0)
   3940 		goto release;
   3941 
   3942 	/*
   3943 	 * Make sure HW does not configure LCD from PHY extended configuration
   3944 	 * before SW configuration
   3945 	 */
   3946 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3947 	if ((sc->sc_type < WM_T_PCH2)
   3948 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3949 		goto release;
   3950 
   3951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3952 		device_xname(sc->sc_dev), __func__));
   3953 	/* word_addr is in DWORD */
   3954 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3955 
   3956 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3957 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3958 	if (cnf_size == 0)
   3959 		goto release;
   3960 
   3961 	if (((sc->sc_type == WM_T_PCH)
   3962 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3963 	    || (sc->sc_type > WM_T_PCH)) {
   3964 		/*
   3965 		 * HW configures the SMBus address and LEDs when the OEM and
   3966 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3967 		 * are cleared, SW will configure them instead.
   3968 		 */
   3969 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3970 			device_xname(sc->sc_dev), __func__));
   3971 		wm_write_smbus_addr(sc);
   3972 
   3973 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3974 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3975 	}
   3976 
   3977 	/* Configure LCD from extended configuration region. */
   3978 	for (i = 0; i < cnf_size; i++) {
   3979 		uint16_t reg_data, reg_addr;
   3980 
   3981 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3982 			goto release;
   3983 
   3984 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3985 			goto release;
   3986 
   3987 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3988 			phy_page = reg_data;
   3989 
   3990 		reg_addr &= IGPHY_MAXREGADDR;
   3991 		reg_addr |= phy_page;
   3992 
   3993 		sc->phy.release(sc); /* XXX */
   3994 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3995 		sc->phy.acquire(sc); /* XXX */
   3996 	}
   3997 
   3998 release:
   3999 	sc->phy.release(sc);
   4000 	return;
   4001 }
   4002 
   4003 
   4004 /* Init hardware bits */
   4005 void
   4006 wm_initialize_hardware_bits(struct wm_softc *sc)
   4007 {
   4008 	uint32_t tarc0, tarc1, reg;
   4009 
   4010 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4011 		device_xname(sc->sc_dev), __func__));
   4012 
   4013 	/* For 82571 variant, 80003 and ICHs */
   4014 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4015 	    || (sc->sc_type >= WM_T_80003)) {
   4016 
   4017 		/* Transmit Descriptor Control 0 */
   4018 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4019 		reg |= TXDCTL_COUNT_DESC;
   4020 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4021 
   4022 		/* Transmit Descriptor Control 1 */
   4023 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4024 		reg |= TXDCTL_COUNT_DESC;
   4025 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4026 
   4027 		/* TARC0 */
   4028 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4029 		switch (sc->sc_type) {
   4030 		case WM_T_82571:
   4031 		case WM_T_82572:
   4032 		case WM_T_82573:
   4033 		case WM_T_82574:
   4034 		case WM_T_82583:
   4035 		case WM_T_80003:
   4036 			/* Clear bits 30..27 */
   4037 			tarc0 &= ~__BITS(30, 27);
   4038 			break;
   4039 		default:
   4040 			break;
   4041 		}
   4042 
   4043 		switch (sc->sc_type) {
   4044 		case WM_T_82571:
   4045 		case WM_T_82572:
   4046 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4047 
   4048 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4049 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4050 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4051 			/* 8257[12] Errata No.7 */
   4052 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4053 
   4054 			/* TARC1 bit 28 */
   4055 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4056 				tarc1 &= ~__BIT(28);
   4057 			else
   4058 				tarc1 |= __BIT(28);
   4059 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4060 
   4061 			/*
   4062 			 * 8257[12] Errata No.13
   4063 			 * Disable Dyamic Clock Gating.
   4064 			 */
   4065 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4066 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4067 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4068 			break;
   4069 		case WM_T_82573:
   4070 		case WM_T_82574:
   4071 		case WM_T_82583:
   4072 			if ((sc->sc_type == WM_T_82574)
   4073 			    || (sc->sc_type == WM_T_82583))
   4074 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4075 
   4076 			/* Extended Device Control */
   4077 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4078 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4079 			reg |= __BIT(22);	/* Set bit 22 */
   4080 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4081 
   4082 			/* Device Control */
   4083 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4084 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4085 
   4086 			/* PCIe Control Register */
   4087 			/*
   4088 			 * 82573 Errata (unknown).
   4089 			 *
   4090 			 * 82574 Errata 25 and 82583 Errata 12
   4091 			 * "Dropped Rx Packets":
   4092 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4093 			 */
   4094 			reg = CSR_READ(sc, WMREG_GCR);
   4095 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4096 			CSR_WRITE(sc, WMREG_GCR, reg);
   4097 
   4098 			if ((sc->sc_type == WM_T_82574)
   4099 			    || (sc->sc_type == WM_T_82583)) {
   4100 				/*
   4101 				 * Document says this bit must be set for
   4102 				 * proper operation.
   4103 				 */
   4104 				reg = CSR_READ(sc, WMREG_GCR);
   4105 				reg |= __BIT(22);
   4106 				CSR_WRITE(sc, WMREG_GCR, reg);
   4107 
   4108 				/*
   4109 				 * Apply workaround for hardware errata
   4110 				 * documented in errata docs Fixes issue where
   4111 				 * some error prone or unreliable PCIe
   4112 				 * completions are occurring, particularly
   4113 				 * with ASPM enabled. Without fix, issue can
   4114 				 * cause Tx timeouts.
   4115 				 */
   4116 				reg = CSR_READ(sc, WMREG_GCR2);
   4117 				reg |= __BIT(0);
   4118 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4119 			}
   4120 			break;
   4121 		case WM_T_80003:
   4122 			/* TARC0 */
   4123 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4124 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4125 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4126 
   4127 			/* TARC1 bit 28 */
   4128 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4129 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4130 				tarc1 &= ~__BIT(28);
   4131 			else
   4132 				tarc1 |= __BIT(28);
   4133 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4134 			break;
   4135 		case WM_T_ICH8:
   4136 		case WM_T_ICH9:
   4137 		case WM_T_ICH10:
   4138 		case WM_T_PCH:
   4139 		case WM_T_PCH2:
   4140 		case WM_T_PCH_LPT:
   4141 		case WM_T_PCH_SPT:
   4142 		case WM_T_PCH_CNP:
   4143 			/* TARC0 */
   4144 			if (sc->sc_type == WM_T_ICH8) {
   4145 				/* Set TARC0 bits 29 and 28 */
   4146 				tarc0 |= __BITS(29, 28);
   4147 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4148 				tarc0 |= __BIT(29);
   4149 				/*
   4150 				 *  Drop bit 28. From Linux.
   4151 				 * See I218/I219 spec update
   4152 				 * "5. Buffer Overrun While the I219 is
   4153 				 * Processing DMA Transactions"
   4154 				 */
   4155 				tarc0 &= ~__BIT(28);
   4156 			}
   4157 			/* Set TARC0 bits 23,24,26,27 */
   4158 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4159 
   4160 			/* CTRL_EXT */
   4161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4162 			reg |= __BIT(22);	/* Set bit 22 */
   4163 			/*
   4164 			 * Enable PHY low-power state when MAC is at D3
   4165 			 * w/o WoL
   4166 			 */
   4167 			if (sc->sc_type >= WM_T_PCH)
   4168 				reg |= CTRL_EXT_PHYPDEN;
   4169 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4170 
   4171 			/* TARC1 */
   4172 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4173 			/* bit 28 */
   4174 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4175 				tarc1 &= ~__BIT(28);
   4176 			else
   4177 				tarc1 |= __BIT(28);
   4178 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4179 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4180 
   4181 			/* Device Status */
   4182 			if (sc->sc_type == WM_T_ICH8) {
   4183 				reg = CSR_READ(sc, WMREG_STATUS);
   4184 				reg &= ~__BIT(31);
   4185 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4186 
   4187 			}
   4188 
   4189 			/* IOSFPC */
   4190 			if (sc->sc_type == WM_T_PCH_SPT) {
   4191 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4192 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4193 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4194 			}
   4195 			/*
   4196 			 * Work-around descriptor data corruption issue during
   4197 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4198 			 * capability.
   4199 			 */
   4200 			reg = CSR_READ(sc, WMREG_RFCTL);
   4201 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4202 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4203 			break;
   4204 		default:
   4205 			break;
   4206 		}
   4207 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4208 
   4209 		switch (sc->sc_type) {
   4210 		/*
   4211 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4212 		 * Avoid RSS Hash Value bug.
   4213 		 */
   4214 		case WM_T_82571:
   4215 		case WM_T_82572:
   4216 		case WM_T_82573:
   4217 		case WM_T_80003:
   4218 		case WM_T_ICH8:
   4219 			reg = CSR_READ(sc, WMREG_RFCTL);
   4220 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4221 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4222 			break;
   4223 		case WM_T_82574:
   4224 			/* use extened Rx descriptor. */
   4225 			reg = CSR_READ(sc, WMREG_RFCTL);
   4226 			reg |= WMREG_RFCTL_EXSTEN;
   4227 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4228 			break;
   4229 		default:
   4230 			break;
   4231 		}
   4232 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4233 		/*
   4234 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4235 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4236 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4237 		 * Correctly by the Device"
   4238 		 *
   4239 		 * I354(C2000) Errata AVR53:
   4240 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4241 		 * Hang"
   4242 		 */
   4243 		reg = CSR_READ(sc, WMREG_RFCTL);
   4244 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4245 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4246 	}
   4247 }
   4248 
   4249 static uint32_t
   4250 wm_rxpbs_adjust_82580(uint32_t val)
   4251 {
   4252 	uint32_t rv = 0;
   4253 
   4254 	if (val < __arraycount(wm_82580_rxpbs_table))
   4255 		rv = wm_82580_rxpbs_table[val];
   4256 
   4257 	return rv;
   4258 }
   4259 
   4260 /*
   4261  * wm_reset_phy:
   4262  *
   4263  *	generic PHY reset function.
   4264  *	Same as e1000_phy_hw_reset_generic()
   4265  */
   4266 static void
   4267 wm_reset_phy(struct wm_softc *sc)
   4268 {
   4269 	uint32_t reg;
   4270 
   4271 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4272 		device_xname(sc->sc_dev), __func__));
   4273 	if (wm_phy_resetisblocked(sc))
   4274 		return;
   4275 
   4276 	sc->phy.acquire(sc);
   4277 
   4278 	reg = CSR_READ(sc, WMREG_CTRL);
   4279 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4280 	CSR_WRITE_FLUSH(sc);
   4281 
   4282 	delay(sc->phy.reset_delay_us);
   4283 
   4284 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4285 	CSR_WRITE_FLUSH(sc);
   4286 
   4287 	delay(150);
   4288 
   4289 	sc->phy.release(sc);
   4290 
   4291 	wm_get_cfg_done(sc);
   4292 	wm_phy_post_reset(sc);
   4293 }
   4294 
   4295 /*
   4296  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4297  * so it is enough to check sc->sc_queue[0] only.
   4298  */
   4299 static void
   4300 wm_flush_desc_rings(struct wm_softc *sc)
   4301 {
   4302 	pcireg_t preg;
   4303 	uint32_t reg;
   4304 	struct wm_txqueue *txq;
   4305 	wiseman_txdesc_t *txd;
   4306 	int nexttx;
   4307 	uint32_t rctl;
   4308 
   4309 	/* First, disable MULR fix in FEXTNVM11 */
   4310 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4311 	reg |= FEXTNVM11_DIS_MULRFIX;
   4312 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4313 
   4314 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4315 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4316 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4317 		return;
   4318 
   4319 	/* TX */
   4320 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4321 	    device_xname(sc->sc_dev), preg, reg);
   4322 	reg = CSR_READ(sc, WMREG_TCTL);
   4323 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4324 
   4325 	txq = &sc->sc_queue[0].wmq_txq;
   4326 	nexttx = txq->txq_next;
   4327 	txd = &txq->txq_descs[nexttx];
   4328 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4329 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4330 	txd->wtx_fields.wtxu_status = 0;
   4331 	txd->wtx_fields.wtxu_options = 0;
   4332 	txd->wtx_fields.wtxu_vlan = 0;
   4333 
   4334 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4335 	    BUS_SPACE_BARRIER_WRITE);
   4336 
   4337 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4338 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4339 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4340 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4341 	delay(250);
   4342 
   4343 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4344 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4345 		return;
   4346 
   4347 	/* RX */
   4348 	printf("%s: Need RX flush (reg = %08x)\n",
   4349 	    device_xname(sc->sc_dev), preg);
   4350 	rctl = CSR_READ(sc, WMREG_RCTL);
   4351 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4352 	CSR_WRITE_FLUSH(sc);
   4353 	delay(150);
   4354 
   4355 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4356 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4357 	reg &= 0xffffc000;
   4358 	/*
   4359 	 * update thresholds: prefetch threshold to 31, host threshold
   4360 	 * to 1 and make sure the granularity is "descriptors" and not
   4361 	 * "cache lines"
   4362 	 */
   4363 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4364 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4365 
   4366 	/*
   4367 	 * momentarily enable the RX ring for the changes to take
   4368 	 * effect
   4369 	 */
   4370 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4371 	CSR_WRITE_FLUSH(sc);
   4372 	delay(150);
   4373 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4374 }
   4375 
   4376 /*
   4377  * wm_reset:
   4378  *
   4379  *	Reset the i82542 chip.
   4380  */
   4381 static void
   4382 wm_reset(struct wm_softc *sc)
   4383 {
   4384 	int phy_reset = 0;
   4385 	int i, error = 0;
   4386 	uint32_t reg;
   4387 	uint16_t kmreg;
   4388 	int rv;
   4389 
   4390 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4391 		device_xname(sc->sc_dev), __func__));
   4392 	KASSERT(sc->sc_type != 0);
   4393 
   4394 	/*
   4395 	 * Allocate on-chip memory according to the MTU size.
   4396 	 * The Packet Buffer Allocation register must be written
   4397 	 * before the chip is reset.
   4398 	 */
   4399 	switch (sc->sc_type) {
   4400 	case WM_T_82547:
   4401 	case WM_T_82547_2:
   4402 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4403 		    PBA_22K : PBA_30K;
   4404 		for (i = 0; i < sc->sc_nqueues; i++) {
   4405 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4406 			txq->txq_fifo_head = 0;
   4407 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4408 			txq->txq_fifo_size =
   4409 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4410 			txq->txq_fifo_stall = 0;
   4411 		}
   4412 		break;
   4413 	case WM_T_82571:
   4414 	case WM_T_82572:
   4415 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4416 	case WM_T_80003:
   4417 		sc->sc_pba = PBA_32K;
   4418 		break;
   4419 	case WM_T_82573:
   4420 		sc->sc_pba = PBA_12K;
   4421 		break;
   4422 	case WM_T_82574:
   4423 	case WM_T_82583:
   4424 		sc->sc_pba = PBA_20K;
   4425 		break;
   4426 	case WM_T_82576:
   4427 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4428 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4429 		break;
   4430 	case WM_T_82580:
   4431 	case WM_T_I350:
   4432 	case WM_T_I354:
   4433 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4434 		break;
   4435 	case WM_T_I210:
   4436 	case WM_T_I211:
   4437 		sc->sc_pba = PBA_34K;
   4438 		break;
   4439 	case WM_T_ICH8:
   4440 		/* Workaround for a bit corruption issue in FIFO memory */
   4441 		sc->sc_pba = PBA_8K;
   4442 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4443 		break;
   4444 	case WM_T_ICH9:
   4445 	case WM_T_ICH10:
   4446 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4447 		    PBA_14K : PBA_10K;
   4448 		break;
   4449 	case WM_T_PCH:
   4450 	case WM_T_PCH2:	/* XXX 14K? */
   4451 	case WM_T_PCH_LPT:
   4452 	case WM_T_PCH_SPT:
   4453 	case WM_T_PCH_CNP:
   4454 		sc->sc_pba = PBA_26K;
   4455 		break;
   4456 	default:
   4457 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4458 		    PBA_40K : PBA_48K;
   4459 		break;
   4460 	}
   4461 	/*
   4462 	 * Only old or non-multiqueue devices have the PBA register
   4463 	 * XXX Need special handling for 82575.
   4464 	 */
   4465 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4466 	    || (sc->sc_type == WM_T_82575))
   4467 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4468 
   4469 	/* Prevent the PCI-E bus from sticking */
   4470 	if (sc->sc_flags & WM_F_PCIE) {
   4471 		int timeout = 800;
   4472 
   4473 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4474 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4475 
   4476 		while (timeout--) {
   4477 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4478 			    == 0)
   4479 				break;
   4480 			delay(100);
   4481 		}
   4482 		if (timeout == 0)
   4483 			device_printf(sc->sc_dev,
   4484 			    "failed to disable busmastering\n");
   4485 	}
   4486 
   4487 	/* Set the completion timeout for interface */
   4488 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4489 	    || (sc->sc_type == WM_T_82580)
   4490 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4491 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4492 		wm_set_pcie_completion_timeout(sc);
   4493 
   4494 	/* Clear interrupt */
   4495 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4496 	if (wm_is_using_msix(sc)) {
   4497 		if (sc->sc_type != WM_T_82574) {
   4498 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4499 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4500 		} else {
   4501 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4502 		}
   4503 	}
   4504 
   4505 	/* Stop the transmit and receive processes. */
   4506 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4507 	sc->sc_rctl &= ~RCTL_EN;
   4508 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4509 	CSR_WRITE_FLUSH(sc);
   4510 
   4511 	/* XXX set_tbi_sbp_82543() */
   4512 
   4513 	delay(10*1000);
   4514 
   4515 	/* Must acquire the MDIO ownership before MAC reset */
   4516 	switch (sc->sc_type) {
   4517 	case WM_T_82573:
   4518 	case WM_T_82574:
   4519 	case WM_T_82583:
   4520 		error = wm_get_hw_semaphore_82573(sc);
   4521 		break;
   4522 	default:
   4523 		break;
   4524 	}
   4525 
   4526 	/*
   4527 	 * 82541 Errata 29? & 82547 Errata 28?
   4528 	 * See also the description about PHY_RST bit in CTRL register
   4529 	 * in 8254x_GBe_SDM.pdf.
   4530 	 */
   4531 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4532 		CSR_WRITE(sc, WMREG_CTRL,
   4533 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4534 		CSR_WRITE_FLUSH(sc);
   4535 		delay(5000);
   4536 	}
   4537 
   4538 	switch (sc->sc_type) {
   4539 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4540 	case WM_T_82541:
   4541 	case WM_T_82541_2:
   4542 	case WM_T_82547:
   4543 	case WM_T_82547_2:
   4544 		/*
   4545 		 * On some chipsets, a reset through a memory-mapped write
   4546 		 * cycle can cause the chip to reset before completing the
   4547 		 * write cycle.  This causes major headache that can be
   4548 		 * avoided by issuing the reset via indirect register writes
   4549 		 * through I/O space.
   4550 		 *
   4551 		 * So, if we successfully mapped the I/O BAR at attach time,
   4552 		 * use that.  Otherwise, try our luck with a memory-mapped
   4553 		 * reset.
   4554 		 */
   4555 		if (sc->sc_flags & WM_F_IOH_VALID)
   4556 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4557 		else
   4558 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4559 		break;
   4560 	case WM_T_82545_3:
   4561 	case WM_T_82546_3:
   4562 		/* Use the shadow control register on these chips. */
   4563 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4564 		break;
   4565 	case WM_T_80003:
   4566 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4567 		sc->phy.acquire(sc);
   4568 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4569 		sc->phy.release(sc);
   4570 		break;
   4571 	case WM_T_ICH8:
   4572 	case WM_T_ICH9:
   4573 	case WM_T_ICH10:
   4574 	case WM_T_PCH:
   4575 	case WM_T_PCH2:
   4576 	case WM_T_PCH_LPT:
   4577 	case WM_T_PCH_SPT:
   4578 	case WM_T_PCH_CNP:
   4579 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4580 		if (wm_phy_resetisblocked(sc) == false) {
   4581 			/*
   4582 			 * Gate automatic PHY configuration by hardware on
   4583 			 * non-managed 82579
   4584 			 */
   4585 			if ((sc->sc_type == WM_T_PCH2)
   4586 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4587 				== 0))
   4588 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4589 
   4590 			reg |= CTRL_PHY_RESET;
   4591 			phy_reset = 1;
   4592 		} else
   4593 			printf("XXX reset is blocked!!!\n");
   4594 		sc->phy.acquire(sc);
   4595 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4596 		/* Don't insert a completion barrier when reset */
   4597 		delay(20*1000);
   4598 		mutex_exit(sc->sc_ich_phymtx);
   4599 		break;
   4600 	case WM_T_82580:
   4601 	case WM_T_I350:
   4602 	case WM_T_I354:
   4603 	case WM_T_I210:
   4604 	case WM_T_I211:
   4605 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4606 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4607 			CSR_WRITE_FLUSH(sc);
   4608 		delay(5000);
   4609 		break;
   4610 	case WM_T_82542_2_0:
   4611 	case WM_T_82542_2_1:
   4612 	case WM_T_82543:
   4613 	case WM_T_82540:
   4614 	case WM_T_82545:
   4615 	case WM_T_82546:
   4616 	case WM_T_82571:
   4617 	case WM_T_82572:
   4618 	case WM_T_82573:
   4619 	case WM_T_82574:
   4620 	case WM_T_82575:
   4621 	case WM_T_82576:
   4622 	case WM_T_82583:
   4623 	default:
   4624 		/* Everything else can safely use the documented method. */
   4625 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4626 		break;
   4627 	}
   4628 
   4629 	/* Must release the MDIO ownership after MAC reset */
   4630 	switch (sc->sc_type) {
   4631 	case WM_T_82573:
   4632 	case WM_T_82574:
   4633 	case WM_T_82583:
   4634 		if (error == 0)
   4635 			wm_put_hw_semaphore_82573(sc);
   4636 		break;
   4637 	default:
   4638 		break;
   4639 	}
   4640 
   4641 	if (phy_reset != 0)
   4642 		wm_get_cfg_done(sc);
   4643 
   4644 	/* reload EEPROM */
   4645 	switch (sc->sc_type) {
   4646 	case WM_T_82542_2_0:
   4647 	case WM_T_82542_2_1:
   4648 	case WM_T_82543:
   4649 	case WM_T_82544:
   4650 		delay(10);
   4651 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4652 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4653 		CSR_WRITE_FLUSH(sc);
   4654 		delay(2000);
   4655 		break;
   4656 	case WM_T_82540:
   4657 	case WM_T_82545:
   4658 	case WM_T_82545_3:
   4659 	case WM_T_82546:
   4660 	case WM_T_82546_3:
   4661 		delay(5*1000);
   4662 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4663 		break;
   4664 	case WM_T_82541:
   4665 	case WM_T_82541_2:
   4666 	case WM_T_82547:
   4667 	case WM_T_82547_2:
   4668 		delay(20000);
   4669 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4670 		break;
   4671 	case WM_T_82571:
   4672 	case WM_T_82572:
   4673 	case WM_T_82573:
   4674 	case WM_T_82574:
   4675 	case WM_T_82583:
   4676 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4677 			delay(10);
   4678 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4679 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4680 			CSR_WRITE_FLUSH(sc);
   4681 		}
   4682 		/* check EECD_EE_AUTORD */
   4683 		wm_get_auto_rd_done(sc);
   4684 		/*
   4685 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4686 		 * is set.
   4687 		 */
   4688 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4689 		    || (sc->sc_type == WM_T_82583))
   4690 			delay(25*1000);
   4691 		break;
   4692 	case WM_T_82575:
   4693 	case WM_T_82576:
   4694 	case WM_T_82580:
   4695 	case WM_T_I350:
   4696 	case WM_T_I354:
   4697 	case WM_T_I210:
   4698 	case WM_T_I211:
   4699 	case WM_T_80003:
   4700 		/* check EECD_EE_AUTORD */
   4701 		wm_get_auto_rd_done(sc);
   4702 		break;
   4703 	case WM_T_ICH8:
   4704 	case WM_T_ICH9:
   4705 	case WM_T_ICH10:
   4706 	case WM_T_PCH:
   4707 	case WM_T_PCH2:
   4708 	case WM_T_PCH_LPT:
   4709 	case WM_T_PCH_SPT:
   4710 	case WM_T_PCH_CNP:
   4711 		break;
   4712 	default:
   4713 		panic("%s: unknown type\n", __func__);
   4714 	}
   4715 
   4716 	/* Check whether EEPROM is present or not */
   4717 	switch (sc->sc_type) {
   4718 	case WM_T_82575:
   4719 	case WM_T_82576:
   4720 	case WM_T_82580:
   4721 	case WM_T_I350:
   4722 	case WM_T_I354:
   4723 	case WM_T_ICH8:
   4724 	case WM_T_ICH9:
   4725 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4726 			/* Not found */
   4727 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4728 			if (sc->sc_type == WM_T_82575)
   4729 				wm_reset_init_script_82575(sc);
   4730 		}
   4731 		break;
   4732 	default:
   4733 		break;
   4734 	}
   4735 
   4736 	if (phy_reset != 0)
   4737 		wm_phy_post_reset(sc);
   4738 
   4739 	if ((sc->sc_type == WM_T_82580)
   4740 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4741 		/* clear global device reset status bit */
   4742 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4743 	}
   4744 
   4745 	/* Clear any pending interrupt events. */
   4746 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4747 	reg = CSR_READ(sc, WMREG_ICR);
   4748 	if (wm_is_using_msix(sc)) {
   4749 		if (sc->sc_type != WM_T_82574) {
   4750 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4751 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4752 		} else
   4753 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4754 	}
   4755 
   4756 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4757 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4758 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4759 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4760 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4761 		reg |= KABGTXD_BGSQLBIAS;
   4762 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4763 	}
   4764 
   4765 	/* reload sc_ctrl */
   4766 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4767 
   4768 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4769 		wm_set_eee_i350(sc);
   4770 
   4771 	/*
   4772 	 * For PCH, this write will make sure that any noise will be detected
   4773 	 * as a CRC error and be dropped rather than show up as a bad packet
   4774 	 * to the DMA engine
   4775 	 */
   4776 	if (sc->sc_type == WM_T_PCH)
   4777 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4778 
   4779 	if (sc->sc_type >= WM_T_82544)
   4780 		CSR_WRITE(sc, WMREG_WUC, 0);
   4781 
   4782 	wm_reset_mdicnfg_82580(sc);
   4783 
   4784 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4785 		wm_pll_workaround_i210(sc);
   4786 
   4787 	if (sc->sc_type == WM_T_80003) {
   4788 		/* default to TRUE to enable the MDIC W/A */
   4789 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4790 
   4791 		rv = wm_kmrn_readreg(sc,
   4792 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4793 		if (rv == 0) {
   4794 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4795 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4796 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4797 			else
   4798 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4799 		}
   4800 	}
   4801 }
   4802 
   4803 /*
   4804  * wm_add_rxbuf:
   4805  *
   4806  *	Add a receive buffer to the indiciated descriptor.
   4807  */
   4808 static int
   4809 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4810 {
   4811 	struct wm_softc *sc = rxq->rxq_sc;
   4812 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4813 	struct mbuf *m;
   4814 	int error;
   4815 
   4816 	KASSERT(mutex_owned(rxq->rxq_lock));
   4817 
   4818 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4819 	if (m == NULL)
   4820 		return ENOBUFS;
   4821 
   4822 	MCLGET(m, M_DONTWAIT);
   4823 	if ((m->m_flags & M_EXT) == 0) {
   4824 		m_freem(m);
   4825 		return ENOBUFS;
   4826 	}
   4827 
   4828 	if (rxs->rxs_mbuf != NULL)
   4829 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4830 
   4831 	rxs->rxs_mbuf = m;
   4832 
   4833 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4834 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4835 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4836 	if (error) {
   4837 		/* XXX XXX XXX */
   4838 		aprint_error_dev(sc->sc_dev,
   4839 		    "unable to load rx DMA map %d, error = %d\n",
   4840 		    idx, error);
   4841 		panic("wm_add_rxbuf");
   4842 	}
   4843 
   4844 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4845 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4846 
   4847 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4848 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4849 			wm_init_rxdesc(rxq, idx);
   4850 	} else
   4851 		wm_init_rxdesc(rxq, idx);
   4852 
   4853 	return 0;
   4854 }
   4855 
   4856 /*
   4857  * wm_rxdrain:
   4858  *
   4859  *	Drain the receive queue.
   4860  */
   4861 static void
   4862 wm_rxdrain(struct wm_rxqueue *rxq)
   4863 {
   4864 	struct wm_softc *sc = rxq->rxq_sc;
   4865 	struct wm_rxsoft *rxs;
   4866 	int i;
   4867 
   4868 	KASSERT(mutex_owned(rxq->rxq_lock));
   4869 
   4870 	for (i = 0; i < WM_NRXDESC; i++) {
   4871 		rxs = &rxq->rxq_soft[i];
   4872 		if (rxs->rxs_mbuf != NULL) {
   4873 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4874 			m_freem(rxs->rxs_mbuf);
   4875 			rxs->rxs_mbuf = NULL;
   4876 		}
   4877 	}
   4878 }
   4879 
   4880 /*
   4881  * Setup registers for RSS.
   4882  *
   4883  * XXX not yet VMDq support
   4884  */
   4885 static void
   4886 wm_init_rss(struct wm_softc *sc)
   4887 {
   4888 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4889 	int i;
   4890 
   4891 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4892 
   4893 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4894 		int qid, reta_ent;
   4895 
   4896 		qid  = i % sc->sc_nqueues;
   4897 		switch(sc->sc_type) {
   4898 		case WM_T_82574:
   4899 			reta_ent = __SHIFTIN(qid,
   4900 			    RETA_ENT_QINDEX_MASK_82574);
   4901 			break;
   4902 		case WM_T_82575:
   4903 			reta_ent = __SHIFTIN(qid,
   4904 			    RETA_ENT_QINDEX1_MASK_82575);
   4905 			break;
   4906 		default:
   4907 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4908 			break;
   4909 		}
   4910 
   4911 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4912 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4913 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4914 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4915 	}
   4916 
   4917 	rss_getkey((uint8_t *)rss_key);
   4918 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4919 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4920 
   4921 	if (sc->sc_type == WM_T_82574)
   4922 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4923 	else
   4924 		mrqc = MRQC_ENABLE_RSS_MQ;
   4925 
   4926 	/*
   4927 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4928 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4929 	 */
   4930 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4931 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4932 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4933 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4934 
   4935 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4936 }
   4937 
   4938 /*
   4939  * Adjust TX and RX queue numbers which the system actulally uses.
   4940  *
   4941  * The numbers are affected by below parameters.
   4942  *     - The nubmer of hardware queues
   4943  *     - The number of MSI-X vectors (= "nvectors" argument)
   4944  *     - ncpu
   4945  */
   4946 static void
   4947 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4948 {
   4949 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4950 
   4951 	if (nvectors < 2) {
   4952 		sc->sc_nqueues = 1;
   4953 		return;
   4954 	}
   4955 
   4956 	switch(sc->sc_type) {
   4957 	case WM_T_82572:
   4958 		hw_ntxqueues = 2;
   4959 		hw_nrxqueues = 2;
   4960 		break;
   4961 	case WM_T_82574:
   4962 		hw_ntxqueues = 2;
   4963 		hw_nrxqueues = 2;
   4964 		break;
   4965 	case WM_T_82575:
   4966 		hw_ntxqueues = 4;
   4967 		hw_nrxqueues = 4;
   4968 		break;
   4969 	case WM_T_82576:
   4970 		hw_ntxqueues = 16;
   4971 		hw_nrxqueues = 16;
   4972 		break;
   4973 	case WM_T_82580:
   4974 	case WM_T_I350:
   4975 	case WM_T_I354:
   4976 		hw_ntxqueues = 8;
   4977 		hw_nrxqueues = 8;
   4978 		break;
   4979 	case WM_T_I210:
   4980 		hw_ntxqueues = 4;
   4981 		hw_nrxqueues = 4;
   4982 		break;
   4983 	case WM_T_I211:
   4984 		hw_ntxqueues = 2;
   4985 		hw_nrxqueues = 2;
   4986 		break;
   4987 		/*
   4988 		 * As below ethernet controllers does not support MSI-X,
   4989 		 * this driver let them not use multiqueue.
   4990 		 *     - WM_T_80003
   4991 		 *     - WM_T_ICH8
   4992 		 *     - WM_T_ICH9
   4993 		 *     - WM_T_ICH10
   4994 		 *     - WM_T_PCH
   4995 		 *     - WM_T_PCH2
   4996 		 *     - WM_T_PCH_LPT
   4997 		 */
   4998 	default:
   4999 		hw_ntxqueues = 1;
   5000 		hw_nrxqueues = 1;
   5001 		break;
   5002 	}
   5003 
   5004 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   5005 
   5006 	/*
   5007 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5008 	 * the number of queues used actually.
   5009 	 */
   5010 	if (nvectors < hw_nqueues + 1) {
   5011 		sc->sc_nqueues = nvectors - 1;
   5012 	} else {
   5013 		sc->sc_nqueues = hw_nqueues;
   5014 	}
   5015 
   5016 	/*
   5017 	 * As queues more then cpus cannot improve scaling, we limit
   5018 	 * the number of queues used actually.
   5019 	 */
   5020 	if (ncpu < sc->sc_nqueues)
   5021 		sc->sc_nqueues = ncpu;
   5022 }
   5023 
   5024 static inline bool
   5025 wm_is_using_msix(struct wm_softc *sc)
   5026 {
   5027 
   5028 	return (sc->sc_nintrs > 1);
   5029 }
   5030 
   5031 static inline bool
   5032 wm_is_using_multiqueue(struct wm_softc *sc)
   5033 {
   5034 
   5035 	return (sc->sc_nqueues > 1);
   5036 }
   5037 
   5038 static int
   5039 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5040 {
   5041 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5042 	wmq->wmq_id = qidx;
   5043 	wmq->wmq_intr_idx = intr_idx;
   5044 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5045 #ifdef WM_MPSAFE
   5046 	    | SOFTINT_MPSAFE
   5047 #endif
   5048 	    , wm_handle_queue, wmq);
   5049 	if (wmq->wmq_si != NULL)
   5050 		return 0;
   5051 
   5052 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5053 	    wmq->wmq_id);
   5054 
   5055 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5056 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5057 	return ENOMEM;
   5058 }
   5059 
   5060 /*
   5061  * Both single interrupt MSI and INTx can use this function.
   5062  */
   5063 static int
   5064 wm_setup_legacy(struct wm_softc *sc)
   5065 {
   5066 	pci_chipset_tag_t pc = sc->sc_pc;
   5067 	const char *intrstr = NULL;
   5068 	char intrbuf[PCI_INTRSTR_LEN];
   5069 	int error;
   5070 
   5071 	error = wm_alloc_txrx_queues(sc);
   5072 	if (error) {
   5073 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5074 		    error);
   5075 		return ENOMEM;
   5076 	}
   5077 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5078 	    sizeof(intrbuf));
   5079 #ifdef WM_MPSAFE
   5080 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5081 #endif
   5082 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5083 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5084 	if (sc->sc_ihs[0] == NULL) {
   5085 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5086 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5087 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5088 		return ENOMEM;
   5089 	}
   5090 
   5091 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5092 	sc->sc_nintrs = 1;
   5093 
   5094 	return wm_softint_establish(sc, 0, 0);
   5095 }
   5096 
   5097 static int
   5098 wm_setup_msix(struct wm_softc *sc)
   5099 {
   5100 	void *vih;
   5101 	kcpuset_t *affinity;
   5102 	int qidx, error, intr_idx, txrx_established;
   5103 	pci_chipset_tag_t pc = sc->sc_pc;
   5104 	const char *intrstr = NULL;
   5105 	char intrbuf[PCI_INTRSTR_LEN];
   5106 	char intr_xname[INTRDEVNAMEBUF];
   5107 
   5108 	if (sc->sc_nqueues < ncpu) {
   5109 		/*
   5110 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5111 		 * interrupts start from CPU#1.
   5112 		 */
   5113 		sc->sc_affinity_offset = 1;
   5114 	} else {
   5115 		/*
   5116 		 * In this case, this device use all CPUs. So, we unify
   5117 		 * affinitied cpu_index to msix vector number for readability.
   5118 		 */
   5119 		sc->sc_affinity_offset = 0;
   5120 	}
   5121 
   5122 	error = wm_alloc_txrx_queues(sc);
   5123 	if (error) {
   5124 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5125 		    error);
   5126 		return ENOMEM;
   5127 	}
   5128 
   5129 	kcpuset_create(&affinity, false);
   5130 	intr_idx = 0;
   5131 
   5132 	/*
   5133 	 * TX and RX
   5134 	 */
   5135 	txrx_established = 0;
   5136 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5137 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5138 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5139 
   5140 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5141 		    sizeof(intrbuf));
   5142 #ifdef WM_MPSAFE
   5143 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5144 		    PCI_INTR_MPSAFE, true);
   5145 #endif
   5146 		memset(intr_xname, 0, sizeof(intr_xname));
   5147 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5148 		    device_xname(sc->sc_dev), qidx);
   5149 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5150 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5151 		if (vih == NULL) {
   5152 			aprint_error_dev(sc->sc_dev,
   5153 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5154 			    intrstr ? " at " : "",
   5155 			    intrstr ? intrstr : "");
   5156 
   5157 			goto fail;
   5158 		}
   5159 		kcpuset_zero(affinity);
   5160 		/* Round-robin affinity */
   5161 		kcpuset_set(affinity, affinity_to);
   5162 		error = interrupt_distribute(vih, affinity, NULL);
   5163 		if (error == 0) {
   5164 			aprint_normal_dev(sc->sc_dev,
   5165 			    "for TX and RX interrupting at %s affinity to %u\n",
   5166 			    intrstr, affinity_to);
   5167 		} else {
   5168 			aprint_normal_dev(sc->sc_dev,
   5169 			    "for TX and RX interrupting at %s\n", intrstr);
   5170 		}
   5171 		sc->sc_ihs[intr_idx] = vih;
   5172 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5173 			goto fail;
   5174 		txrx_established++;
   5175 		intr_idx++;
   5176 	}
   5177 
   5178 	/*
   5179 	 * LINK
   5180 	 */
   5181 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5182 	    sizeof(intrbuf));
   5183 #ifdef WM_MPSAFE
   5184 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5185 #endif
   5186 	memset(intr_xname, 0, sizeof(intr_xname));
   5187 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5188 	    device_xname(sc->sc_dev));
   5189 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5190 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5191 	if (vih == NULL) {
   5192 		aprint_error_dev(sc->sc_dev,
   5193 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5194 		    intrstr ? " at " : "",
   5195 		    intrstr ? intrstr : "");
   5196 
   5197 		goto fail;
   5198 	}
   5199 	/* keep default affinity to LINK interrupt */
   5200 	aprint_normal_dev(sc->sc_dev,
   5201 	    "for LINK interrupting at %s\n", intrstr);
   5202 	sc->sc_ihs[intr_idx] = vih;
   5203 	sc->sc_link_intr_idx = intr_idx;
   5204 
   5205 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5206 	kcpuset_destroy(affinity);
   5207 	return 0;
   5208 
   5209  fail:
   5210 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5211 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5212 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5213 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5214 	}
   5215 
   5216 	kcpuset_destroy(affinity);
   5217 	return ENOMEM;
   5218 }
   5219 
   5220 static void
   5221 wm_unset_stopping_flags(struct wm_softc *sc)
   5222 {
   5223 	int i;
   5224 
   5225 	KASSERT(WM_CORE_LOCKED(sc));
   5226 
   5227 	/*
   5228 	 * must unset stopping flags in ascending order.
   5229 	 */
   5230 	for(i = 0; i < sc->sc_nqueues; i++) {
   5231 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5232 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5233 
   5234 		mutex_enter(txq->txq_lock);
   5235 		txq->txq_stopping = false;
   5236 		mutex_exit(txq->txq_lock);
   5237 
   5238 		mutex_enter(rxq->rxq_lock);
   5239 		rxq->rxq_stopping = false;
   5240 		mutex_exit(rxq->rxq_lock);
   5241 	}
   5242 
   5243 	sc->sc_core_stopping = false;
   5244 }
   5245 
   5246 static void
   5247 wm_set_stopping_flags(struct wm_softc *sc)
   5248 {
   5249 	int i;
   5250 
   5251 	KASSERT(WM_CORE_LOCKED(sc));
   5252 
   5253 	sc->sc_core_stopping = true;
   5254 
   5255 	/*
   5256 	 * must set stopping flags in ascending order.
   5257 	 */
   5258 	for(i = 0; i < sc->sc_nqueues; i++) {
   5259 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5260 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5261 
   5262 		mutex_enter(rxq->rxq_lock);
   5263 		rxq->rxq_stopping = true;
   5264 		mutex_exit(rxq->rxq_lock);
   5265 
   5266 		mutex_enter(txq->txq_lock);
   5267 		txq->txq_stopping = true;
   5268 		mutex_exit(txq->txq_lock);
   5269 	}
   5270 }
   5271 
   5272 /*
   5273  * write interrupt interval value to ITR or EITR
   5274  */
   5275 static void
   5276 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5277 {
   5278 
   5279 	if (!wmq->wmq_set_itr)
   5280 		return;
   5281 
   5282 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5283 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5284 
   5285 		/*
   5286 		 * 82575 doesn't have CNT_INGR field.
   5287 		 * So, overwrite counter field by software.
   5288 		 */
   5289 		if (sc->sc_type == WM_T_82575)
   5290 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5291 		else
   5292 			eitr |= EITR_CNT_INGR;
   5293 
   5294 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5295 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5296 		/*
   5297 		 * 82574 has both ITR and EITR. SET EITR when we use
   5298 		 * the multi queue function with MSI-X.
   5299 		 */
   5300 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5301 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5302 	} else {
   5303 		KASSERT(wmq->wmq_id == 0);
   5304 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5305 	}
   5306 
   5307 	wmq->wmq_set_itr = false;
   5308 }
   5309 
   5310 /*
   5311  * TODO
   5312  * Below dynamic calculation of itr is almost the same as linux igb,
   5313  * however it does not fit to wm(4). So, we will have been disable AIM
   5314  * until we will find appropriate calculation of itr.
   5315  */
   5316 /*
   5317  * calculate interrupt interval value to be going to write register in
   5318  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5319  */
   5320 static void
   5321 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5322 {
   5323 #ifdef NOTYET
   5324 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5325 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5326 	uint32_t avg_size = 0;
   5327 	uint32_t new_itr;
   5328 
   5329 	if (rxq->rxq_packets)
   5330 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5331 	if (txq->txq_packets)
   5332 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5333 
   5334 	if (avg_size == 0) {
   5335 		new_itr = 450; /* restore default value */
   5336 		goto out;
   5337 	}
   5338 
   5339 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5340 	avg_size += 24;
   5341 
   5342 	/* Don't starve jumbo frames */
   5343 	avg_size = min(avg_size, 3000);
   5344 
   5345 	/* Give a little boost to mid-size frames */
   5346 	if ((avg_size > 300) && (avg_size < 1200))
   5347 		new_itr = avg_size / 3;
   5348 	else
   5349 		new_itr = avg_size / 2;
   5350 
   5351 out:
   5352 	/*
   5353 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5354 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5355 	 */
   5356 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5357 		new_itr *= 4;
   5358 
   5359 	if (new_itr != wmq->wmq_itr) {
   5360 		wmq->wmq_itr = new_itr;
   5361 		wmq->wmq_set_itr = true;
   5362 	} else
   5363 		wmq->wmq_set_itr = false;
   5364 
   5365 	rxq->rxq_packets = 0;
   5366 	rxq->rxq_bytes = 0;
   5367 	txq->txq_packets = 0;
   5368 	txq->txq_bytes = 0;
   5369 #endif
   5370 }
   5371 
   5372 /*
   5373  * wm_init:		[ifnet interface function]
   5374  *
   5375  *	Initialize the interface.
   5376  */
   5377 static int
   5378 wm_init(struct ifnet *ifp)
   5379 {
   5380 	struct wm_softc *sc = ifp->if_softc;
   5381 	int ret;
   5382 
   5383 	WM_CORE_LOCK(sc);
   5384 	ret = wm_init_locked(ifp);
   5385 	WM_CORE_UNLOCK(sc);
   5386 
   5387 	return ret;
   5388 }
   5389 
   5390 static int
   5391 wm_init_locked(struct ifnet *ifp)
   5392 {
   5393 	struct wm_softc *sc = ifp->if_softc;
   5394 	int i, j, trynum, error = 0;
   5395 	uint32_t reg;
   5396 
   5397 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5398 		device_xname(sc->sc_dev), __func__));
   5399 	KASSERT(WM_CORE_LOCKED(sc));
   5400 
   5401 	/*
   5402 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5403 	 * There is a small but measurable benefit to avoiding the adjusment
   5404 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5405 	 * on such platforms.  One possibility is that the DMA itself is
   5406 	 * slightly more efficient if the front of the entire packet (instead
   5407 	 * of the front of the headers) is aligned.
   5408 	 *
   5409 	 * Note we must always set align_tweak to 0 if we are using
   5410 	 * jumbo frames.
   5411 	 */
   5412 #ifdef __NO_STRICT_ALIGNMENT
   5413 	sc->sc_align_tweak = 0;
   5414 #else
   5415 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5416 		sc->sc_align_tweak = 0;
   5417 	else
   5418 		sc->sc_align_tweak = 2;
   5419 #endif /* __NO_STRICT_ALIGNMENT */
   5420 
   5421 	/* Cancel any pending I/O. */
   5422 	wm_stop_locked(ifp, 0);
   5423 
   5424 	/* update statistics before reset */
   5425 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5426 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5427 
   5428 	/* PCH_SPT hardware workaround */
   5429 	if (sc->sc_type == WM_T_PCH_SPT)
   5430 		wm_flush_desc_rings(sc);
   5431 
   5432 	/* Reset the chip to a known state. */
   5433 	wm_reset(sc);
   5434 
   5435 	/*
   5436 	 * AMT based hardware can now take control from firmware
   5437 	 * Do this after reset.
   5438 	 */
   5439 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5440 		wm_get_hw_control(sc);
   5441 
   5442 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5443 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5444 		wm_legacy_irq_quirk_spt(sc);
   5445 
   5446 	/* Init hardware bits */
   5447 	wm_initialize_hardware_bits(sc);
   5448 
   5449 	/* Reset the PHY. */
   5450 	if (sc->sc_flags & WM_F_HAS_MII)
   5451 		wm_gmii_reset(sc);
   5452 
   5453 	/* Calculate (E)ITR value */
   5454 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5455 		/*
   5456 		 * For NEWQUEUE's EITR (except for 82575).
   5457 		 * 82575's EITR should be set same throttling value as other
   5458 		 * old controllers' ITR because the interrupt/sec calculation
   5459 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5460 		 *
   5461 		 * 82574's EITR should be set same throttling value as ITR.
   5462 		 *
   5463 		 * For N interrupts/sec, set this value to:
   5464 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5465 		 */
   5466 		sc->sc_itr_init = 450;
   5467 	} else if (sc->sc_type >= WM_T_82543) {
   5468 		/*
   5469 		 * Set up the interrupt throttling register (units of 256ns)
   5470 		 * Note that a footnote in Intel's documentation says this
   5471 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5472 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5473 		 * that that is also true for the 1024ns units of the other
   5474 		 * interrupt-related timer registers -- so, really, we ought
   5475 		 * to divide this value by 4 when the link speed is low.
   5476 		 *
   5477 		 * XXX implement this division at link speed change!
   5478 		 */
   5479 
   5480 		/*
   5481 		 * For N interrupts/sec, set this value to:
   5482 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5483 		 * absolute and packet timer values to this value
   5484 		 * divided by 4 to get "simple timer" behavior.
   5485 		 */
   5486 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5487 	}
   5488 
   5489 	error = wm_init_txrx_queues(sc);
   5490 	if (error)
   5491 		goto out;
   5492 
   5493 	/*
   5494 	 * Clear out the VLAN table -- we don't use it (yet).
   5495 	 */
   5496 	CSR_WRITE(sc, WMREG_VET, 0);
   5497 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5498 		trynum = 10; /* Due to hw errata */
   5499 	else
   5500 		trynum = 1;
   5501 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5502 		for (j = 0; j < trynum; j++)
   5503 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5504 
   5505 	/*
   5506 	 * Set up flow-control parameters.
   5507 	 *
   5508 	 * XXX Values could probably stand some tuning.
   5509 	 */
   5510 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5511 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5512 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5513 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5514 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5515 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5516 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5517 	}
   5518 
   5519 	sc->sc_fcrtl = FCRTL_DFLT;
   5520 	if (sc->sc_type < WM_T_82543) {
   5521 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5522 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5523 	} else {
   5524 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5525 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5526 	}
   5527 
   5528 	if (sc->sc_type == WM_T_80003)
   5529 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5530 	else
   5531 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5532 
   5533 	/* Writes the control register. */
   5534 	wm_set_vlan(sc);
   5535 
   5536 	if (sc->sc_flags & WM_F_HAS_MII) {
   5537 		uint16_t kmreg;
   5538 
   5539 		switch (sc->sc_type) {
   5540 		case WM_T_80003:
   5541 		case WM_T_ICH8:
   5542 		case WM_T_ICH9:
   5543 		case WM_T_ICH10:
   5544 		case WM_T_PCH:
   5545 		case WM_T_PCH2:
   5546 		case WM_T_PCH_LPT:
   5547 		case WM_T_PCH_SPT:
   5548 		case WM_T_PCH_CNP:
   5549 			/*
   5550 			 * Set the mac to wait the maximum time between each
   5551 			 * iteration and increase the max iterations when
   5552 			 * polling the phy; this fixes erroneous timeouts at
   5553 			 * 10Mbps.
   5554 			 */
   5555 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5556 			    0xFFFF);
   5557 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5558 			    &kmreg);
   5559 			kmreg |= 0x3F;
   5560 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5561 			    kmreg);
   5562 			break;
   5563 		default:
   5564 			break;
   5565 		}
   5566 
   5567 		if (sc->sc_type == WM_T_80003) {
   5568 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5569 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5570 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5571 
   5572 			/* Bypass RX and TX FIFO's */
   5573 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5574 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5575 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5576 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5577 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5578 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5579 		}
   5580 	}
   5581 #if 0
   5582 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5583 #endif
   5584 
   5585 	/* Set up checksum offload parameters. */
   5586 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5587 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5588 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5589 		reg |= RXCSUM_IPOFL;
   5590 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5591 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5592 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5593 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5594 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5595 
   5596 	/* Set registers about MSI-X */
   5597 	if (wm_is_using_msix(sc)) {
   5598 		uint32_t ivar;
   5599 		struct wm_queue *wmq;
   5600 		int qid, qintr_idx;
   5601 
   5602 		if (sc->sc_type == WM_T_82575) {
   5603 			/* Interrupt control */
   5604 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5605 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5606 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5607 
   5608 			/* TX and RX */
   5609 			for (i = 0; i < sc->sc_nqueues; i++) {
   5610 				wmq = &sc->sc_queue[i];
   5611 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5612 				    EITR_TX_QUEUE(wmq->wmq_id)
   5613 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5614 			}
   5615 			/* Link status */
   5616 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5617 			    EITR_OTHER);
   5618 		} else if (sc->sc_type == WM_T_82574) {
   5619 			/* Interrupt control */
   5620 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5621 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5622 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5623 
   5624 			/*
   5625 			 * workaround issue with spurious interrupts
   5626 			 * in MSI-X mode.
   5627 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5628 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5629 			 */
   5630 			reg = CSR_READ(sc, WMREG_RFCTL);
   5631 			reg |= WMREG_RFCTL_ACKDIS;
   5632 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5633 
   5634 			ivar = 0;
   5635 			/* TX and RX */
   5636 			for (i = 0; i < sc->sc_nqueues; i++) {
   5637 				wmq = &sc->sc_queue[i];
   5638 				qid = wmq->wmq_id;
   5639 				qintr_idx = wmq->wmq_intr_idx;
   5640 
   5641 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5642 				    IVAR_TX_MASK_Q_82574(qid));
   5643 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5644 				    IVAR_RX_MASK_Q_82574(qid));
   5645 			}
   5646 			/* Link status */
   5647 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5648 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5649 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5650 		} else {
   5651 			/* Interrupt control */
   5652 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5653 			    | GPIE_EIAME | GPIE_PBA);
   5654 
   5655 			switch (sc->sc_type) {
   5656 			case WM_T_82580:
   5657 			case WM_T_I350:
   5658 			case WM_T_I354:
   5659 			case WM_T_I210:
   5660 			case WM_T_I211:
   5661 				/* TX and RX */
   5662 				for (i = 0; i < sc->sc_nqueues; i++) {
   5663 					wmq = &sc->sc_queue[i];
   5664 					qid = wmq->wmq_id;
   5665 					qintr_idx = wmq->wmq_intr_idx;
   5666 
   5667 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5668 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5669 					ivar |= __SHIFTIN((qintr_idx
   5670 						| IVAR_VALID),
   5671 					    IVAR_TX_MASK_Q(qid));
   5672 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5673 					ivar |= __SHIFTIN((qintr_idx
   5674 						| IVAR_VALID),
   5675 					    IVAR_RX_MASK_Q(qid));
   5676 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5677 				}
   5678 				break;
   5679 			case WM_T_82576:
   5680 				/* TX and RX */
   5681 				for (i = 0; i < sc->sc_nqueues; i++) {
   5682 					wmq = &sc->sc_queue[i];
   5683 					qid = wmq->wmq_id;
   5684 					qintr_idx = wmq->wmq_intr_idx;
   5685 
   5686 					ivar = CSR_READ(sc,
   5687 					    WMREG_IVAR_Q_82576(qid));
   5688 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5689 					ivar |= __SHIFTIN((qintr_idx
   5690 						| IVAR_VALID),
   5691 					    IVAR_TX_MASK_Q_82576(qid));
   5692 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5693 					ivar |= __SHIFTIN((qintr_idx
   5694 						| IVAR_VALID),
   5695 					    IVAR_RX_MASK_Q_82576(qid));
   5696 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5697 					    ivar);
   5698 				}
   5699 				break;
   5700 			default:
   5701 				break;
   5702 			}
   5703 
   5704 			/* Link status */
   5705 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5706 			    IVAR_MISC_OTHER);
   5707 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5708 		}
   5709 
   5710 		if (wm_is_using_multiqueue(sc)) {
   5711 			wm_init_rss(sc);
   5712 
   5713 			/*
   5714 			** NOTE: Receive Full-Packet Checksum Offload
   5715 			** is mutually exclusive with Multiqueue. However
   5716 			** this is not the same as TCP/IP checksums which
   5717 			** still work.
   5718 			*/
   5719 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5720 			reg |= RXCSUM_PCSD;
   5721 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5722 		}
   5723 	}
   5724 
   5725 	/* Set up the interrupt registers. */
   5726 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5727 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5728 	    ICR_RXO | ICR_RXT0;
   5729 	if (wm_is_using_msix(sc)) {
   5730 		uint32_t mask;
   5731 		struct wm_queue *wmq;
   5732 
   5733 		switch (sc->sc_type) {
   5734 		case WM_T_82574:
   5735 			mask = 0;
   5736 			for (i = 0; i < sc->sc_nqueues; i++) {
   5737 				wmq = &sc->sc_queue[i];
   5738 				mask |= ICR_TXQ(wmq->wmq_id);
   5739 				mask |= ICR_RXQ(wmq->wmq_id);
   5740 			}
   5741 			mask |= ICR_OTHER;
   5742 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5743 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5744 			break;
   5745 		default:
   5746 			if (sc->sc_type == WM_T_82575) {
   5747 				mask = 0;
   5748 				for (i = 0; i < sc->sc_nqueues; i++) {
   5749 					wmq = &sc->sc_queue[i];
   5750 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5751 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5752 				}
   5753 				mask |= EITR_OTHER;
   5754 			} else {
   5755 				mask = 0;
   5756 				for (i = 0; i < sc->sc_nqueues; i++) {
   5757 					wmq = &sc->sc_queue[i];
   5758 					mask |= 1 << wmq->wmq_intr_idx;
   5759 				}
   5760 				mask |= 1 << sc->sc_link_intr_idx;
   5761 			}
   5762 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5763 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5764 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5765 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5766 			break;
   5767 		}
   5768 	} else
   5769 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5770 
   5771 	/* Set up the inter-packet gap. */
   5772 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5773 
   5774 	if (sc->sc_type >= WM_T_82543) {
   5775 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5776 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5777 			wm_itrs_writereg(sc, wmq);
   5778 		}
   5779 		/*
   5780 		 * Link interrupts occur much less than TX
   5781 		 * interrupts and RX interrupts. So, we don't
   5782 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5783 		 * FreeBSD's if_igb.
   5784 		 */
   5785 	}
   5786 
   5787 	/* Set the VLAN ethernetype. */
   5788 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5789 
   5790 	/*
   5791 	 * Set up the transmit control register; we start out with
   5792 	 * a collision distance suitable for FDX, but update it whe
   5793 	 * we resolve the media type.
   5794 	 */
   5795 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5796 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5797 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5798 	if (sc->sc_type >= WM_T_82571)
   5799 		sc->sc_tctl |= TCTL_MULR;
   5800 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5801 
   5802 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5803 		/* Write TDT after TCTL.EN is set. See the document. */
   5804 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5805 	}
   5806 
   5807 	if (sc->sc_type == WM_T_80003) {
   5808 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5809 		reg &= ~TCTL_EXT_GCEX_MASK;
   5810 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5811 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5812 	}
   5813 
   5814 	/* Set the media. */
   5815 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5816 		goto out;
   5817 
   5818 	/* Configure for OS presence */
   5819 	wm_init_manageability(sc);
   5820 
   5821 	/*
   5822 	 * Set up the receive control register; we actually program
   5823 	 * the register when we set the receive filter.  Use multicast
   5824 	 * address offset type 0.
   5825 	 *
   5826 	 * Only the i82544 has the ability to strip the incoming
   5827 	 * CRC, so we don't enable that feature.
   5828 	 */
   5829 	sc->sc_mchash_type = 0;
   5830 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5831 	    | RCTL_MO(sc->sc_mchash_type);
   5832 
   5833 	/*
   5834 	 * 82574 use one buffer extended Rx descriptor.
   5835 	 */
   5836 	if (sc->sc_type == WM_T_82574)
   5837 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5838 
   5839 	/*
   5840 	 * The I350 has a bug where it always strips the CRC whether
   5841 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5842 	 */
   5843 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5844 	    || (sc->sc_type == WM_T_I210))
   5845 		sc->sc_rctl |= RCTL_SECRC;
   5846 
   5847 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5848 	    && (ifp->if_mtu > ETHERMTU)) {
   5849 		sc->sc_rctl |= RCTL_LPE;
   5850 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5851 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5852 	}
   5853 
   5854 	if (MCLBYTES == 2048) {
   5855 		sc->sc_rctl |= RCTL_2k;
   5856 	} else {
   5857 		if (sc->sc_type >= WM_T_82543) {
   5858 			switch (MCLBYTES) {
   5859 			case 4096:
   5860 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5861 				break;
   5862 			case 8192:
   5863 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5864 				break;
   5865 			case 16384:
   5866 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5867 				break;
   5868 			default:
   5869 				panic("wm_init: MCLBYTES %d unsupported",
   5870 				    MCLBYTES);
   5871 				break;
   5872 			}
   5873 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5874 	}
   5875 
   5876 	/* Enable ECC */
   5877 	switch (sc->sc_type) {
   5878 	case WM_T_82571:
   5879 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5880 		reg |= PBA_ECC_CORR_EN;
   5881 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5882 		break;
   5883 	case WM_T_PCH_LPT:
   5884 	case WM_T_PCH_SPT:
   5885 	case WM_T_PCH_CNP:
   5886 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5887 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5888 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5889 
   5890 		sc->sc_ctrl |= CTRL_MEHE;
   5891 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5892 		break;
   5893 	default:
   5894 		break;
   5895 	}
   5896 
   5897 	/*
   5898 	 * Set the receive filter.
   5899 	 *
   5900 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5901 	 * the setting of RCTL.EN in wm_set_filter()
   5902 	 */
   5903 	wm_set_filter(sc);
   5904 
   5905 	/* On 575 and later set RDT only if RX enabled */
   5906 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5907 		int qidx;
   5908 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5909 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5910 			for (i = 0; i < WM_NRXDESC; i++) {
   5911 				mutex_enter(rxq->rxq_lock);
   5912 				wm_init_rxdesc(rxq, i);
   5913 				mutex_exit(rxq->rxq_lock);
   5914 
   5915 			}
   5916 		}
   5917 	}
   5918 
   5919 	wm_unset_stopping_flags(sc);
   5920 
   5921 	/* Start the one second link check clock. */
   5922 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5923 
   5924 	/* ...all done! */
   5925 	ifp->if_flags |= IFF_RUNNING;
   5926 	ifp->if_flags &= ~IFF_OACTIVE;
   5927 
   5928  out:
   5929 	sc->sc_if_flags = ifp->if_flags;
   5930 	if (error)
   5931 		log(LOG_ERR, "%s: interface not running\n",
   5932 		    device_xname(sc->sc_dev));
   5933 	return error;
   5934 }
   5935 
   5936 /*
   5937  * wm_stop:		[ifnet interface function]
   5938  *
   5939  *	Stop transmission on the interface.
   5940  */
   5941 static void
   5942 wm_stop(struct ifnet *ifp, int disable)
   5943 {
   5944 	struct wm_softc *sc = ifp->if_softc;
   5945 
   5946 	WM_CORE_LOCK(sc);
   5947 	wm_stop_locked(ifp, disable);
   5948 	WM_CORE_UNLOCK(sc);
   5949 }
   5950 
   5951 static void
   5952 wm_stop_locked(struct ifnet *ifp, int disable)
   5953 {
   5954 	struct wm_softc *sc = ifp->if_softc;
   5955 	struct wm_txsoft *txs;
   5956 	int i, qidx;
   5957 
   5958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5959 		device_xname(sc->sc_dev), __func__));
   5960 	KASSERT(WM_CORE_LOCKED(sc));
   5961 
   5962 	wm_set_stopping_flags(sc);
   5963 
   5964 	/* Stop the one second clock. */
   5965 	callout_stop(&sc->sc_tick_ch);
   5966 
   5967 	/* Stop the 82547 Tx FIFO stall check timer. */
   5968 	if (sc->sc_type == WM_T_82547)
   5969 		callout_stop(&sc->sc_txfifo_ch);
   5970 
   5971 	if (sc->sc_flags & WM_F_HAS_MII) {
   5972 		/* Down the MII. */
   5973 		mii_down(&sc->sc_mii);
   5974 	} else {
   5975 #if 0
   5976 		/* Should we clear PHY's status properly? */
   5977 		wm_reset(sc);
   5978 #endif
   5979 	}
   5980 
   5981 	/* Stop the transmit and receive processes. */
   5982 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5983 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5984 	sc->sc_rctl &= ~RCTL_EN;
   5985 
   5986 	/*
   5987 	 * Clear the interrupt mask to ensure the device cannot assert its
   5988 	 * interrupt line.
   5989 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5990 	 * service any currently pending or shared interrupt.
   5991 	 */
   5992 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5993 	sc->sc_icr = 0;
   5994 	if (wm_is_using_msix(sc)) {
   5995 		if (sc->sc_type != WM_T_82574) {
   5996 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5997 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5998 		} else
   5999 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6000 	}
   6001 
   6002 	/* Release any queued transmit buffers. */
   6003 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6004 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6005 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6006 		mutex_enter(txq->txq_lock);
   6007 		txq->txq_watchdog = false; /* ensure watchdog disabled */
   6008 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6009 			txs = &txq->txq_soft[i];
   6010 			if (txs->txs_mbuf != NULL) {
   6011 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6012 				m_freem(txs->txs_mbuf);
   6013 				txs->txs_mbuf = NULL;
   6014 			}
   6015 		}
   6016 		mutex_exit(txq->txq_lock);
   6017 	}
   6018 
   6019 	/* Mark the interface as down and cancel the watchdog timer. */
   6020 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6021 
   6022 	if (disable) {
   6023 		for (i = 0; i < sc->sc_nqueues; i++) {
   6024 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6025 			mutex_enter(rxq->rxq_lock);
   6026 			wm_rxdrain(rxq);
   6027 			mutex_exit(rxq->rxq_lock);
   6028 		}
   6029 	}
   6030 
   6031 #if 0 /* notyet */
   6032 	if (sc->sc_type >= WM_T_82544)
   6033 		CSR_WRITE(sc, WMREG_WUC, 0);
   6034 #endif
   6035 }
   6036 
   6037 static void
   6038 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6039 {
   6040 	struct mbuf *m;
   6041 	int i;
   6042 
   6043 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6044 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6045 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6046 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6047 		    m->m_data, m->m_len, m->m_flags);
   6048 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6049 	    i, i == 1 ? "" : "s");
   6050 }
   6051 
   6052 /*
   6053  * wm_82547_txfifo_stall:
   6054  *
   6055  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6056  *	reset the FIFO pointers, and restart packet transmission.
   6057  */
   6058 static void
   6059 wm_82547_txfifo_stall(void *arg)
   6060 {
   6061 	struct wm_softc *sc = arg;
   6062 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6063 
   6064 	mutex_enter(txq->txq_lock);
   6065 
   6066 	if (txq->txq_stopping)
   6067 		goto out;
   6068 
   6069 	if (txq->txq_fifo_stall) {
   6070 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6071 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6072 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6073 			/*
   6074 			 * Packets have drained.  Stop transmitter, reset
   6075 			 * FIFO pointers, restart transmitter, and kick
   6076 			 * the packet queue.
   6077 			 */
   6078 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6079 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6080 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6081 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6082 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6083 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6084 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6085 			CSR_WRITE_FLUSH(sc);
   6086 
   6087 			txq->txq_fifo_head = 0;
   6088 			txq->txq_fifo_stall = 0;
   6089 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6090 		} else {
   6091 			/*
   6092 			 * Still waiting for packets to drain; try again in
   6093 			 * another tick.
   6094 			 */
   6095 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6096 		}
   6097 	}
   6098 
   6099 out:
   6100 	mutex_exit(txq->txq_lock);
   6101 }
   6102 
   6103 /*
   6104  * wm_82547_txfifo_bugchk:
   6105  *
   6106  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6107  *	prevent enqueueing a packet that would wrap around the end
   6108  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6109  *
   6110  *	We do this by checking the amount of space before the end
   6111  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6112  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6113  *	the internal FIFO pointers to the beginning, and restart
   6114  *	transmission on the interface.
   6115  */
   6116 #define	WM_FIFO_HDR		0x10
   6117 #define	WM_82547_PAD_LEN	0x3e0
   6118 static int
   6119 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6120 {
   6121 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6122 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6123 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6124 
   6125 	/* Just return if already stalled. */
   6126 	if (txq->txq_fifo_stall)
   6127 		return 1;
   6128 
   6129 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6130 		/* Stall only occurs in half-duplex mode. */
   6131 		goto send_packet;
   6132 	}
   6133 
   6134 	if (len >= WM_82547_PAD_LEN + space) {
   6135 		txq->txq_fifo_stall = 1;
   6136 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6137 		return 1;
   6138 	}
   6139 
   6140  send_packet:
   6141 	txq->txq_fifo_head += len;
   6142 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6143 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6144 
   6145 	return 0;
   6146 }
   6147 
   6148 static int
   6149 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6150 {
   6151 	int error;
   6152 
   6153 	/*
   6154 	 * Allocate the control data structures, and create and load the
   6155 	 * DMA map for it.
   6156 	 *
   6157 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6158 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6159 	 * both sets within the same 4G segment.
   6160 	 */
   6161 	if (sc->sc_type < WM_T_82544)
   6162 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6163 	else
   6164 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6165 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6166 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6167 	else
   6168 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6169 
   6170 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6171 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6172 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6173 		aprint_error_dev(sc->sc_dev,
   6174 		    "unable to allocate TX control data, error = %d\n",
   6175 		    error);
   6176 		goto fail_0;
   6177 	}
   6178 
   6179 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6180 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6181 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6182 		aprint_error_dev(sc->sc_dev,
   6183 		    "unable to map TX control data, error = %d\n", error);
   6184 		goto fail_1;
   6185 	}
   6186 
   6187 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6188 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6189 		aprint_error_dev(sc->sc_dev,
   6190 		    "unable to create TX control data DMA map, error = %d\n",
   6191 		    error);
   6192 		goto fail_2;
   6193 	}
   6194 
   6195 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6196 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6197 		aprint_error_dev(sc->sc_dev,
   6198 		    "unable to load TX control data DMA map, error = %d\n",
   6199 		    error);
   6200 		goto fail_3;
   6201 	}
   6202 
   6203 	return 0;
   6204 
   6205  fail_3:
   6206 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6207  fail_2:
   6208 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6209 	    WM_TXDESCS_SIZE(txq));
   6210  fail_1:
   6211 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6212  fail_0:
   6213 	return error;
   6214 }
   6215 
   6216 static void
   6217 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6218 {
   6219 
   6220 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6221 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6222 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6223 	    WM_TXDESCS_SIZE(txq));
   6224 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6225 }
   6226 
   6227 static int
   6228 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6229 {
   6230 	int error;
   6231 	size_t rxq_descs_size;
   6232 
   6233 	/*
   6234 	 * Allocate the control data structures, and create and load the
   6235 	 * DMA map for it.
   6236 	 *
   6237 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6238 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6239 	 * both sets within the same 4G segment.
   6240 	 */
   6241 	rxq->rxq_ndesc = WM_NRXDESC;
   6242 	if (sc->sc_type == WM_T_82574)
   6243 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6244 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6245 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6246 	else
   6247 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6248 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6249 
   6250 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6251 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6252 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6253 		aprint_error_dev(sc->sc_dev,
   6254 		    "unable to allocate RX control data, error = %d\n",
   6255 		    error);
   6256 		goto fail_0;
   6257 	}
   6258 
   6259 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6260 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6261 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6262 		aprint_error_dev(sc->sc_dev,
   6263 		    "unable to map RX control data, error = %d\n", error);
   6264 		goto fail_1;
   6265 	}
   6266 
   6267 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6268 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6269 		aprint_error_dev(sc->sc_dev,
   6270 		    "unable to create RX control data DMA map, error = %d\n",
   6271 		    error);
   6272 		goto fail_2;
   6273 	}
   6274 
   6275 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6276 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6277 		aprint_error_dev(sc->sc_dev,
   6278 		    "unable to load RX control data DMA map, error = %d\n",
   6279 		    error);
   6280 		goto fail_3;
   6281 	}
   6282 
   6283 	return 0;
   6284 
   6285  fail_3:
   6286 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6287  fail_2:
   6288 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6289 	    rxq_descs_size);
   6290  fail_1:
   6291 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6292  fail_0:
   6293 	return error;
   6294 }
   6295 
   6296 static void
   6297 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6298 {
   6299 
   6300 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6301 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6302 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6303 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6304 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6305 }
   6306 
   6307 
   6308 static int
   6309 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6310 {
   6311 	int i, error;
   6312 
   6313 	/* Create the transmit buffer DMA maps. */
   6314 	WM_TXQUEUELEN(txq) =
   6315 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6316 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6317 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6318 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6319 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6320 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6321 			aprint_error_dev(sc->sc_dev,
   6322 			    "unable to create Tx DMA map %d, error = %d\n",
   6323 			    i, error);
   6324 			goto fail;
   6325 		}
   6326 	}
   6327 
   6328 	return 0;
   6329 
   6330  fail:
   6331 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6332 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6333 			bus_dmamap_destroy(sc->sc_dmat,
   6334 			    txq->txq_soft[i].txs_dmamap);
   6335 	}
   6336 	return error;
   6337 }
   6338 
   6339 static void
   6340 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6341 {
   6342 	int i;
   6343 
   6344 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6345 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6346 			bus_dmamap_destroy(sc->sc_dmat,
   6347 			    txq->txq_soft[i].txs_dmamap);
   6348 	}
   6349 }
   6350 
   6351 static int
   6352 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6353 {
   6354 	int i, error;
   6355 
   6356 	/* Create the receive buffer DMA maps. */
   6357 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6358 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6359 			    MCLBYTES, 0, 0,
   6360 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6361 			aprint_error_dev(sc->sc_dev,
   6362 			    "unable to create Rx DMA map %d error = %d\n",
   6363 			    i, error);
   6364 			goto fail;
   6365 		}
   6366 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6367 	}
   6368 
   6369 	return 0;
   6370 
   6371  fail:
   6372 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6373 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6374 			bus_dmamap_destroy(sc->sc_dmat,
   6375 			    rxq->rxq_soft[i].rxs_dmamap);
   6376 	}
   6377 	return error;
   6378 }
   6379 
   6380 static void
   6381 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6382 {
   6383 	int i;
   6384 
   6385 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6386 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6387 			bus_dmamap_destroy(sc->sc_dmat,
   6388 			    rxq->rxq_soft[i].rxs_dmamap);
   6389 	}
   6390 }
   6391 
   6392 /*
   6393  * wm_alloc_quques:
   6394  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6395  */
   6396 static int
   6397 wm_alloc_txrx_queues(struct wm_softc *sc)
   6398 {
   6399 	int i, error, tx_done, rx_done;
   6400 
   6401 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6402 	    KM_SLEEP);
   6403 	if (sc->sc_queue == NULL) {
   6404 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6405 		error = ENOMEM;
   6406 		goto fail_0;
   6407 	}
   6408 
   6409 	/*
   6410 	 * For transmission
   6411 	 */
   6412 	error = 0;
   6413 	tx_done = 0;
   6414 	for (i = 0; i < sc->sc_nqueues; i++) {
   6415 #ifdef WM_EVENT_COUNTERS
   6416 		int j;
   6417 		const char *xname;
   6418 #endif
   6419 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6420 		txq->txq_sc = sc;
   6421 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6422 
   6423 		error = wm_alloc_tx_descs(sc, txq);
   6424 		if (error)
   6425 			break;
   6426 		error = wm_alloc_tx_buffer(sc, txq);
   6427 		if (error) {
   6428 			wm_free_tx_descs(sc, txq);
   6429 			break;
   6430 		}
   6431 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6432 		if (txq->txq_interq == NULL) {
   6433 			wm_free_tx_descs(sc, txq);
   6434 			wm_free_tx_buffer(sc, txq);
   6435 			error = ENOMEM;
   6436 			break;
   6437 		}
   6438 
   6439 #ifdef WM_EVENT_COUNTERS
   6440 		xname = device_xname(sc->sc_dev);
   6441 
   6442 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6443 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6444 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6445 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6446 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6447 
   6448 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6449 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6450 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6451 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6452 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6453 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6454 
   6455 		for (j = 0; j < WM_NTXSEGS; j++) {
   6456 			snprintf(txq->txq_txseg_evcnt_names[j],
   6457 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6458 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6459 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6460 		}
   6461 
   6462 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6463 
   6464 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6465 #endif /* WM_EVENT_COUNTERS */
   6466 
   6467 		tx_done++;
   6468 	}
   6469 	if (error)
   6470 		goto fail_1;
   6471 
   6472 	/*
   6473 	 * For recieve
   6474 	 */
   6475 	error = 0;
   6476 	rx_done = 0;
   6477 	for (i = 0; i < sc->sc_nqueues; i++) {
   6478 #ifdef WM_EVENT_COUNTERS
   6479 		const char *xname;
   6480 #endif
   6481 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6482 		rxq->rxq_sc = sc;
   6483 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6484 
   6485 		error = wm_alloc_rx_descs(sc, rxq);
   6486 		if (error)
   6487 			break;
   6488 
   6489 		error = wm_alloc_rx_buffer(sc, rxq);
   6490 		if (error) {
   6491 			wm_free_rx_descs(sc, rxq);
   6492 			break;
   6493 		}
   6494 
   6495 #ifdef WM_EVENT_COUNTERS
   6496 		xname = device_xname(sc->sc_dev);
   6497 
   6498 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6499 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6500 
   6501 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6502 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6503 #endif /* WM_EVENT_COUNTERS */
   6504 
   6505 		rx_done++;
   6506 	}
   6507 	if (error)
   6508 		goto fail_2;
   6509 
   6510 	return 0;
   6511 
   6512  fail_2:
   6513 	for (i = 0; i < rx_done; i++) {
   6514 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6515 		wm_free_rx_buffer(sc, rxq);
   6516 		wm_free_rx_descs(sc, rxq);
   6517 		if (rxq->rxq_lock)
   6518 			mutex_obj_free(rxq->rxq_lock);
   6519 	}
   6520  fail_1:
   6521 	for (i = 0; i < tx_done; i++) {
   6522 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6523 		pcq_destroy(txq->txq_interq);
   6524 		wm_free_tx_buffer(sc, txq);
   6525 		wm_free_tx_descs(sc, txq);
   6526 		if (txq->txq_lock)
   6527 			mutex_obj_free(txq->txq_lock);
   6528 	}
   6529 
   6530 	kmem_free(sc->sc_queue,
   6531 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6532  fail_0:
   6533 	return error;
   6534 }
   6535 
   6536 /*
   6537  * wm_free_quques:
   6538  *	Free {tx,rx}descs and {tx,rx} buffers
   6539  */
   6540 static void
   6541 wm_free_txrx_queues(struct wm_softc *sc)
   6542 {
   6543 	int i;
   6544 
   6545 	for (i = 0; i < sc->sc_nqueues; i++) {
   6546 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6547 
   6548 #ifdef WM_EVENT_COUNTERS
   6549 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6550 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6551 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6552 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6553 #endif /* WM_EVENT_COUNTERS */
   6554 
   6555 		wm_free_rx_buffer(sc, rxq);
   6556 		wm_free_rx_descs(sc, rxq);
   6557 		if (rxq->rxq_lock)
   6558 			mutex_obj_free(rxq->rxq_lock);
   6559 	}
   6560 
   6561 	for (i = 0; i < sc->sc_nqueues; i++) {
   6562 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6563 		struct mbuf *m;
   6564 #ifdef WM_EVENT_COUNTERS
   6565 		int j;
   6566 
   6567 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6568 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6569 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6570 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6571 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6572 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6573 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6574 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6575 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6576 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6577 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6578 
   6579 		for (j = 0; j < WM_NTXSEGS; j++)
   6580 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6581 
   6582 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6583 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6584 #endif /* WM_EVENT_COUNTERS */
   6585 
   6586 		/* drain txq_interq */
   6587 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6588 			m_freem(m);
   6589 		pcq_destroy(txq->txq_interq);
   6590 
   6591 		wm_free_tx_buffer(sc, txq);
   6592 		wm_free_tx_descs(sc, txq);
   6593 		if (txq->txq_lock)
   6594 			mutex_obj_free(txq->txq_lock);
   6595 	}
   6596 
   6597 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6598 }
   6599 
   6600 static void
   6601 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6602 {
   6603 
   6604 	KASSERT(mutex_owned(txq->txq_lock));
   6605 
   6606 	/* Initialize the transmit descriptor ring. */
   6607 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6608 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6609 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6610 	txq->txq_free = WM_NTXDESC(txq);
   6611 	txq->txq_next = 0;
   6612 }
   6613 
   6614 static void
   6615 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6616     struct wm_txqueue *txq)
   6617 {
   6618 
   6619 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6620 		device_xname(sc->sc_dev), __func__));
   6621 	KASSERT(mutex_owned(txq->txq_lock));
   6622 
   6623 	if (sc->sc_type < WM_T_82543) {
   6624 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6625 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6626 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6627 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6628 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6629 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6630 	} else {
   6631 		int qid = wmq->wmq_id;
   6632 
   6633 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6634 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6635 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6636 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6637 
   6638 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6639 			/*
   6640 			 * Don't write TDT before TCTL.EN is set.
   6641 			 * See the document.
   6642 			 */
   6643 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6644 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6645 			    | TXDCTL_WTHRESH(0));
   6646 		else {
   6647 			/* XXX should update with AIM? */
   6648 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6649 			if (sc->sc_type >= WM_T_82540) {
   6650 				/* should be same */
   6651 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6652 			}
   6653 
   6654 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6655 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6656 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6657 		}
   6658 	}
   6659 }
   6660 
   6661 static void
   6662 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6663 {
   6664 	int i;
   6665 
   6666 	KASSERT(mutex_owned(txq->txq_lock));
   6667 
   6668 	/* Initialize the transmit job descriptors. */
   6669 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6670 		txq->txq_soft[i].txs_mbuf = NULL;
   6671 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6672 	txq->txq_snext = 0;
   6673 	txq->txq_sdirty = 0;
   6674 }
   6675 
   6676 static void
   6677 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6678     struct wm_txqueue *txq)
   6679 {
   6680 
   6681 	KASSERT(mutex_owned(txq->txq_lock));
   6682 
   6683 	/*
   6684 	 * Set up some register offsets that are different between
   6685 	 * the i82542 and the i82543 and later chips.
   6686 	 */
   6687 	if (sc->sc_type < WM_T_82543)
   6688 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6689 	else
   6690 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6691 
   6692 	wm_init_tx_descs(sc, txq);
   6693 	wm_init_tx_regs(sc, wmq, txq);
   6694 	wm_init_tx_buffer(sc, txq);
   6695 
   6696 	txq->txq_watchdog = false;
   6697 }
   6698 
   6699 static void
   6700 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6701     struct wm_rxqueue *rxq)
   6702 {
   6703 
   6704 	KASSERT(mutex_owned(rxq->rxq_lock));
   6705 
   6706 	/*
   6707 	 * Initialize the receive descriptor and receive job
   6708 	 * descriptor rings.
   6709 	 */
   6710 	if (sc->sc_type < WM_T_82543) {
   6711 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6712 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6713 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6714 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6715 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6716 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6717 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6718 
   6719 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6720 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6721 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6722 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6723 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6724 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6725 	} else {
   6726 		int qid = wmq->wmq_id;
   6727 
   6728 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6729 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6730 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6731 
   6732 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6733 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6734 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6735 
   6736 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6737 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6738 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6739 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6740 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6741 			    | RXDCTL_WTHRESH(1));
   6742 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6743 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6744 		} else {
   6745 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6746 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6747 			/* XXX should update with AIM? */
   6748 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6749 			/* MUST be same */
   6750 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6751 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6752 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6753 		}
   6754 	}
   6755 }
   6756 
   6757 static int
   6758 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6759 {
   6760 	struct wm_rxsoft *rxs;
   6761 	int error, i;
   6762 
   6763 	KASSERT(mutex_owned(rxq->rxq_lock));
   6764 
   6765 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6766 		rxs = &rxq->rxq_soft[i];
   6767 		if (rxs->rxs_mbuf == NULL) {
   6768 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6769 				log(LOG_ERR, "%s: unable to allocate or map "
   6770 				    "rx buffer %d, error = %d\n",
   6771 				    device_xname(sc->sc_dev), i, error);
   6772 				/*
   6773 				 * XXX Should attempt to run with fewer receive
   6774 				 * XXX buffers instead of just failing.
   6775 				 */
   6776 				wm_rxdrain(rxq);
   6777 				return ENOMEM;
   6778 			}
   6779 		} else {
   6780 			/*
   6781 			 * For 82575 and 82576, the RX descriptors must be
   6782 			 * initialized after the setting of RCTL.EN in
   6783 			 * wm_set_filter()
   6784 			 */
   6785 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6786 				wm_init_rxdesc(rxq, i);
   6787 		}
   6788 	}
   6789 	rxq->rxq_ptr = 0;
   6790 	rxq->rxq_discard = 0;
   6791 	WM_RXCHAIN_RESET(rxq);
   6792 
   6793 	return 0;
   6794 }
   6795 
   6796 static int
   6797 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6798     struct wm_rxqueue *rxq)
   6799 {
   6800 
   6801 	KASSERT(mutex_owned(rxq->rxq_lock));
   6802 
   6803 	/*
   6804 	 * Set up some register offsets that are different between
   6805 	 * the i82542 and the i82543 and later chips.
   6806 	 */
   6807 	if (sc->sc_type < WM_T_82543)
   6808 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6809 	else
   6810 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6811 
   6812 	wm_init_rx_regs(sc, wmq, rxq);
   6813 	return wm_init_rx_buffer(sc, rxq);
   6814 }
   6815 
   6816 /*
   6817  * wm_init_quques:
   6818  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6819  */
   6820 static int
   6821 wm_init_txrx_queues(struct wm_softc *sc)
   6822 {
   6823 	int i, error = 0;
   6824 
   6825 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6826 		device_xname(sc->sc_dev), __func__));
   6827 
   6828 	for (i = 0; i < sc->sc_nqueues; i++) {
   6829 		struct wm_queue *wmq = &sc->sc_queue[i];
   6830 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6831 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6832 
   6833 		/*
   6834 		 * TODO
   6835 		 * Currently, use constant variable instead of AIM.
   6836 		 * Furthermore, the interrupt interval of multiqueue which use
   6837 		 * polling mode is less than default value.
   6838 		 * More tuning and AIM are required.
   6839 		 */
   6840 		if (wm_is_using_multiqueue(sc))
   6841 			wmq->wmq_itr = 50;
   6842 		else
   6843 			wmq->wmq_itr = sc->sc_itr_init;
   6844 		wmq->wmq_set_itr = true;
   6845 
   6846 		mutex_enter(txq->txq_lock);
   6847 		wm_init_tx_queue(sc, wmq, txq);
   6848 		mutex_exit(txq->txq_lock);
   6849 
   6850 		mutex_enter(rxq->rxq_lock);
   6851 		error = wm_init_rx_queue(sc, wmq, rxq);
   6852 		mutex_exit(rxq->rxq_lock);
   6853 		if (error)
   6854 			break;
   6855 	}
   6856 
   6857 	return error;
   6858 }
   6859 
   6860 /*
   6861  * wm_tx_offload:
   6862  *
   6863  *	Set up TCP/IP checksumming parameters for the
   6864  *	specified packet.
   6865  */
   6866 static int
   6867 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6868     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6869 {
   6870 	struct mbuf *m0 = txs->txs_mbuf;
   6871 	struct livengood_tcpip_ctxdesc *t;
   6872 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6873 	uint32_t ipcse;
   6874 	struct ether_header *eh;
   6875 	int offset, iphl;
   6876 	uint8_t fields;
   6877 
   6878 	/*
   6879 	 * XXX It would be nice if the mbuf pkthdr had offset
   6880 	 * fields for the protocol headers.
   6881 	 */
   6882 
   6883 	eh = mtod(m0, struct ether_header *);
   6884 	switch (htons(eh->ether_type)) {
   6885 	case ETHERTYPE_IP:
   6886 	case ETHERTYPE_IPV6:
   6887 		offset = ETHER_HDR_LEN;
   6888 		break;
   6889 
   6890 	case ETHERTYPE_VLAN:
   6891 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6892 		break;
   6893 
   6894 	default:
   6895 		/*
   6896 		 * Don't support this protocol or encapsulation.
   6897 		 */
   6898 		*fieldsp = 0;
   6899 		*cmdp = 0;
   6900 		return 0;
   6901 	}
   6902 
   6903 	if ((m0->m_pkthdr.csum_flags &
   6904 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6905 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6906 	} else {
   6907 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6908 	}
   6909 	ipcse = offset + iphl - 1;
   6910 
   6911 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6912 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6913 	seg = 0;
   6914 	fields = 0;
   6915 
   6916 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6917 		int hlen = offset + iphl;
   6918 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6919 
   6920 		if (__predict_false(m0->m_len <
   6921 				    (hlen + sizeof(struct tcphdr)))) {
   6922 			/*
   6923 			 * TCP/IP headers are not in the first mbuf; we need
   6924 			 * to do this the slow and painful way.  Let's just
   6925 			 * hope this doesn't happen very often.
   6926 			 */
   6927 			struct tcphdr th;
   6928 
   6929 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6930 
   6931 			m_copydata(m0, hlen, sizeof(th), &th);
   6932 			if (v4) {
   6933 				struct ip ip;
   6934 
   6935 				m_copydata(m0, offset, sizeof(ip), &ip);
   6936 				ip.ip_len = 0;
   6937 				m_copyback(m0,
   6938 				    offset + offsetof(struct ip, ip_len),
   6939 				    sizeof(ip.ip_len), &ip.ip_len);
   6940 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6941 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6942 			} else {
   6943 				struct ip6_hdr ip6;
   6944 
   6945 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6946 				ip6.ip6_plen = 0;
   6947 				m_copyback(m0,
   6948 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6949 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6950 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6951 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6952 			}
   6953 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6954 			    sizeof(th.th_sum), &th.th_sum);
   6955 
   6956 			hlen += th.th_off << 2;
   6957 		} else {
   6958 			/*
   6959 			 * TCP/IP headers are in the first mbuf; we can do
   6960 			 * this the easy way.
   6961 			 */
   6962 			struct tcphdr *th;
   6963 
   6964 			if (v4) {
   6965 				struct ip *ip =
   6966 				    (void *)(mtod(m0, char *) + offset);
   6967 				th = (void *)(mtod(m0, char *) + hlen);
   6968 
   6969 				ip->ip_len = 0;
   6970 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6971 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6972 			} else {
   6973 				struct ip6_hdr *ip6 =
   6974 				    (void *)(mtod(m0, char *) + offset);
   6975 				th = (void *)(mtod(m0, char *) + hlen);
   6976 
   6977 				ip6->ip6_plen = 0;
   6978 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6979 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6980 			}
   6981 			hlen += th->th_off << 2;
   6982 		}
   6983 
   6984 		if (v4) {
   6985 			WM_Q_EVCNT_INCR(txq, txtso);
   6986 			cmdlen |= WTX_TCPIP_CMD_IP;
   6987 		} else {
   6988 			WM_Q_EVCNT_INCR(txq, txtso6);
   6989 			ipcse = 0;
   6990 		}
   6991 		cmd |= WTX_TCPIP_CMD_TSE;
   6992 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6993 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6994 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6995 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6996 	}
   6997 
   6998 	/*
   6999 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7000 	 * offload feature, if we load the context descriptor, we
   7001 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7002 	 */
   7003 
   7004 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7005 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7006 	    WTX_TCPIP_IPCSE(ipcse);
   7007 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7008 		WM_Q_EVCNT_INCR(txq, txipsum);
   7009 		fields |= WTX_IXSM;
   7010 	}
   7011 
   7012 	offset += iphl;
   7013 
   7014 	if (m0->m_pkthdr.csum_flags &
   7015 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7016 		WM_Q_EVCNT_INCR(txq, txtusum);
   7017 		fields |= WTX_TXSM;
   7018 		tucs = WTX_TCPIP_TUCSS(offset) |
   7019 		    WTX_TCPIP_TUCSO(offset +
   7020 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7021 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7022 	} else if ((m0->m_pkthdr.csum_flags &
   7023 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7024 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7025 		fields |= WTX_TXSM;
   7026 		tucs = WTX_TCPIP_TUCSS(offset) |
   7027 		    WTX_TCPIP_TUCSO(offset +
   7028 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7029 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7030 	} else {
   7031 		/* Just initialize it to a valid TCP context. */
   7032 		tucs = WTX_TCPIP_TUCSS(offset) |
   7033 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7034 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7035 	}
   7036 
   7037 	/*
   7038 	 * We don't have to write context descriptor for every packet
   7039 	 * except for 82574. For 82574, we must write context descriptor
   7040 	 * for every packet when we use two descriptor queues.
   7041 	 * It would be overhead to write context descriptor for every packet,
   7042 	 * however it does not cause problems.
   7043 	 */
   7044 	/* Fill in the context descriptor. */
   7045 	t = (struct livengood_tcpip_ctxdesc *)
   7046 	    &txq->txq_descs[txq->txq_next];
   7047 	t->tcpip_ipcs = htole32(ipcs);
   7048 	t->tcpip_tucs = htole32(tucs);
   7049 	t->tcpip_cmdlen = htole32(cmdlen);
   7050 	t->tcpip_seg = htole32(seg);
   7051 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7052 
   7053 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7054 	txs->txs_ndesc++;
   7055 
   7056 	*cmdp = cmd;
   7057 	*fieldsp = fields;
   7058 
   7059 	return 0;
   7060 }
   7061 
   7062 static inline int
   7063 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7064 {
   7065 	struct wm_softc *sc = ifp->if_softc;
   7066 	u_int cpuid = cpu_index(curcpu());
   7067 
   7068 	/*
   7069 	 * Currently, simple distribute strategy.
   7070 	 * TODO:
   7071 	 * distribute by flowid(RSS has value).
   7072 	 */
   7073         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7074 }
   7075 
   7076 /*
   7077  * wm_start:		[ifnet interface function]
   7078  *
   7079  *	Start packet transmission on the interface.
   7080  */
   7081 static void
   7082 wm_start(struct ifnet *ifp)
   7083 {
   7084 	struct wm_softc *sc = ifp->if_softc;
   7085 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7086 
   7087 #ifdef WM_MPSAFE
   7088 	KASSERT(if_is_mpsafe(ifp));
   7089 #endif
   7090 	/*
   7091 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7092 	 */
   7093 
   7094 	mutex_enter(txq->txq_lock);
   7095 	if (!txq->txq_stopping)
   7096 		wm_start_locked(ifp);
   7097 	mutex_exit(txq->txq_lock);
   7098 }
   7099 
   7100 static void
   7101 wm_start_locked(struct ifnet *ifp)
   7102 {
   7103 	struct wm_softc *sc = ifp->if_softc;
   7104 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7105 
   7106 	wm_send_common_locked(ifp, txq, false);
   7107 }
   7108 
   7109 static int
   7110 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7111 {
   7112 	int qid;
   7113 	struct wm_softc *sc = ifp->if_softc;
   7114 	struct wm_txqueue *txq;
   7115 
   7116 	qid = wm_select_txqueue(ifp, m);
   7117 	txq = &sc->sc_queue[qid].wmq_txq;
   7118 
   7119 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7120 		m_freem(m);
   7121 		WM_Q_EVCNT_INCR(txq, txdrop);
   7122 		return ENOBUFS;
   7123 	}
   7124 
   7125 	/*
   7126 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7127 	 */
   7128 	ifp->if_obytes += m->m_pkthdr.len;
   7129 	if (m->m_flags & M_MCAST)
   7130 		ifp->if_omcasts++;
   7131 
   7132 	if (mutex_tryenter(txq->txq_lock)) {
   7133 		if (!txq->txq_stopping)
   7134 			wm_transmit_locked(ifp, txq);
   7135 		mutex_exit(txq->txq_lock);
   7136 	}
   7137 
   7138 	return 0;
   7139 }
   7140 
   7141 static void
   7142 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7143 {
   7144 
   7145 	wm_send_common_locked(ifp, txq, true);
   7146 }
   7147 
   7148 static void
   7149 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7150     bool is_transmit)
   7151 {
   7152 	struct wm_softc *sc = ifp->if_softc;
   7153 	struct mbuf *m0;
   7154 	struct wm_txsoft *txs;
   7155 	bus_dmamap_t dmamap;
   7156 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7157 	bus_addr_t curaddr;
   7158 	bus_size_t seglen, curlen;
   7159 	uint32_t cksumcmd;
   7160 	uint8_t cksumfields;
   7161 
   7162 	KASSERT(mutex_owned(txq->txq_lock));
   7163 
   7164 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7165 		return;
   7166 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7167 		return;
   7168 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7169 		return;
   7170 
   7171 	/* Remember the previous number of free descriptors. */
   7172 	ofree = txq->txq_free;
   7173 
   7174 	/*
   7175 	 * Loop through the send queue, setting up transmit descriptors
   7176 	 * until we drain the queue, or use up all available transmit
   7177 	 * descriptors.
   7178 	 */
   7179 	for (;;) {
   7180 		m0 = NULL;
   7181 
   7182 		/* Get a work queue entry. */
   7183 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7184 			wm_txeof(txq, UINT_MAX);
   7185 			if (txq->txq_sfree == 0) {
   7186 				DPRINTF(WM_DEBUG_TX,
   7187 				    ("%s: TX: no free job descriptors\n",
   7188 					device_xname(sc->sc_dev)));
   7189 				WM_Q_EVCNT_INCR(txq, txsstall);
   7190 				break;
   7191 			}
   7192 		}
   7193 
   7194 		/* Grab a packet off the queue. */
   7195 		if (is_transmit)
   7196 			m0 = pcq_get(txq->txq_interq);
   7197 		else
   7198 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7199 		if (m0 == NULL)
   7200 			break;
   7201 
   7202 		DPRINTF(WM_DEBUG_TX,
   7203 		    ("%s: TX: have packet to transmit: %p\n",
   7204 		    device_xname(sc->sc_dev), m0));
   7205 
   7206 		txs = &txq->txq_soft[txq->txq_snext];
   7207 		dmamap = txs->txs_dmamap;
   7208 
   7209 		use_tso = (m0->m_pkthdr.csum_flags &
   7210 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7211 
   7212 		/*
   7213 		 * So says the Linux driver:
   7214 		 * The controller does a simple calculation to make sure
   7215 		 * there is enough room in the FIFO before initiating the
   7216 		 * DMA for each buffer.  The calc is:
   7217 		 *	4 = ceil(buffer len / MSS)
   7218 		 * To make sure we don't overrun the FIFO, adjust the max
   7219 		 * buffer len if the MSS drops.
   7220 		 */
   7221 		dmamap->dm_maxsegsz =
   7222 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7223 		    ? m0->m_pkthdr.segsz << 2
   7224 		    : WTX_MAX_LEN;
   7225 
   7226 		/*
   7227 		 * Load the DMA map.  If this fails, the packet either
   7228 		 * didn't fit in the allotted number of segments, or we
   7229 		 * were short on resources.  For the too-many-segments
   7230 		 * case, we simply report an error and drop the packet,
   7231 		 * since we can't sanely copy a jumbo packet to a single
   7232 		 * buffer.
   7233 		 */
   7234 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7235 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7236 		if (error) {
   7237 			if (error == EFBIG) {
   7238 				WM_Q_EVCNT_INCR(txq, txdrop);
   7239 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7240 				    "DMA segments, dropping...\n",
   7241 				    device_xname(sc->sc_dev));
   7242 				wm_dump_mbuf_chain(sc, m0);
   7243 				m_freem(m0);
   7244 				continue;
   7245 			}
   7246 			/*  Short on resources, just stop for now. */
   7247 			DPRINTF(WM_DEBUG_TX,
   7248 			    ("%s: TX: dmamap load failed: %d\n",
   7249 			    device_xname(sc->sc_dev), error));
   7250 			break;
   7251 		}
   7252 
   7253 		segs_needed = dmamap->dm_nsegs;
   7254 		if (use_tso) {
   7255 			/* For sentinel descriptor; see below. */
   7256 			segs_needed++;
   7257 		}
   7258 
   7259 		/*
   7260 		 * Ensure we have enough descriptors free to describe
   7261 		 * the packet.  Note, we always reserve one descriptor
   7262 		 * at the end of the ring due to the semantics of the
   7263 		 * TDT register, plus one more in the event we need
   7264 		 * to load offload context.
   7265 		 */
   7266 		if (segs_needed > txq->txq_free - 2) {
   7267 			/*
   7268 			 * Not enough free descriptors to transmit this
   7269 			 * packet.  We haven't committed anything yet,
   7270 			 * so just unload the DMA map, put the packet
   7271 			 * pack on the queue, and punt.  Notify the upper
   7272 			 * layer that there are no more slots left.
   7273 			 */
   7274 			DPRINTF(WM_DEBUG_TX,
   7275 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7276 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7277 			    segs_needed, txq->txq_free - 1));
   7278 			if (!is_transmit)
   7279 				ifp->if_flags |= IFF_OACTIVE;
   7280 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7281 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7282 			WM_Q_EVCNT_INCR(txq, txdstall);
   7283 			break;
   7284 		}
   7285 
   7286 		/*
   7287 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7288 		 * once we know we can transmit the packet, since we
   7289 		 * do some internal FIFO space accounting here.
   7290 		 */
   7291 		if (sc->sc_type == WM_T_82547 &&
   7292 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7293 			DPRINTF(WM_DEBUG_TX,
   7294 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7295 			    device_xname(sc->sc_dev)));
   7296 			if (!is_transmit)
   7297 				ifp->if_flags |= IFF_OACTIVE;
   7298 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7299 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7300 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7301 			break;
   7302 		}
   7303 
   7304 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7305 
   7306 		DPRINTF(WM_DEBUG_TX,
   7307 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7308 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7309 
   7310 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7311 
   7312 		/*
   7313 		 * Store a pointer to the packet so that we can free it
   7314 		 * later.
   7315 		 *
   7316 		 * Initially, we consider the number of descriptors the
   7317 		 * packet uses the number of DMA segments.  This may be
   7318 		 * incremented by 1 if we do checksum offload (a descriptor
   7319 		 * is used to set the checksum context).
   7320 		 */
   7321 		txs->txs_mbuf = m0;
   7322 		txs->txs_firstdesc = txq->txq_next;
   7323 		txs->txs_ndesc = segs_needed;
   7324 
   7325 		/* Set up offload parameters for this packet. */
   7326 		if (m0->m_pkthdr.csum_flags &
   7327 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7328 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7329 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7330 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7331 					  &cksumfields) != 0) {
   7332 				/* Error message already displayed. */
   7333 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7334 				continue;
   7335 			}
   7336 		} else {
   7337 			cksumcmd = 0;
   7338 			cksumfields = 0;
   7339 		}
   7340 
   7341 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7342 
   7343 		/* Sync the DMA map. */
   7344 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7345 		    BUS_DMASYNC_PREWRITE);
   7346 
   7347 		/* Initialize the transmit descriptor. */
   7348 		for (nexttx = txq->txq_next, seg = 0;
   7349 		     seg < dmamap->dm_nsegs; seg++) {
   7350 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7351 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7352 			     seglen != 0;
   7353 			     curaddr += curlen, seglen -= curlen,
   7354 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7355 				curlen = seglen;
   7356 
   7357 				/*
   7358 				 * So says the Linux driver:
   7359 				 * Work around for premature descriptor
   7360 				 * write-backs in TSO mode.  Append a
   7361 				 * 4-byte sentinel descriptor.
   7362 				 */
   7363 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7364 				    curlen > 8)
   7365 					curlen -= 4;
   7366 
   7367 				wm_set_dma_addr(
   7368 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7369 				txq->txq_descs[nexttx].wtx_cmdlen
   7370 				    = htole32(cksumcmd | curlen);
   7371 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7372 				    = 0;
   7373 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7374 				    = cksumfields;
   7375 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7376 				lasttx = nexttx;
   7377 
   7378 				DPRINTF(WM_DEBUG_TX,
   7379 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7380 				     "len %#04zx\n",
   7381 				    device_xname(sc->sc_dev), nexttx,
   7382 				    (uint64_t)curaddr, curlen));
   7383 			}
   7384 		}
   7385 
   7386 		KASSERT(lasttx != -1);
   7387 
   7388 		/*
   7389 		 * Set up the command byte on the last descriptor of
   7390 		 * the packet.  If we're in the interrupt delay window,
   7391 		 * delay the interrupt.
   7392 		 */
   7393 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7394 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7395 
   7396 		/*
   7397 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7398 		 * up the descriptor to encapsulate the packet for us.
   7399 		 *
   7400 		 * This is only valid on the last descriptor of the packet.
   7401 		 */
   7402 		if (vlan_has_tag(m0)) {
   7403 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7404 			    htole32(WTX_CMD_VLE);
   7405 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7406 			    = htole16(vlan_get_tag(m0));
   7407 		}
   7408 
   7409 		txs->txs_lastdesc = lasttx;
   7410 
   7411 		DPRINTF(WM_DEBUG_TX,
   7412 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7413 		    device_xname(sc->sc_dev),
   7414 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7415 
   7416 		/* Sync the descriptors we're using. */
   7417 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7418 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7419 
   7420 		/* Give the packet to the chip. */
   7421 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7422 
   7423 		DPRINTF(WM_DEBUG_TX,
   7424 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7425 
   7426 		DPRINTF(WM_DEBUG_TX,
   7427 		    ("%s: TX: finished transmitting packet, job %d\n",
   7428 		    device_xname(sc->sc_dev), txq->txq_snext));
   7429 
   7430 		/* Advance the tx pointer. */
   7431 		txq->txq_free -= txs->txs_ndesc;
   7432 		txq->txq_next = nexttx;
   7433 
   7434 		txq->txq_sfree--;
   7435 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7436 
   7437 		/* Pass the packet to any BPF listeners. */
   7438 		bpf_mtap(ifp, m0);
   7439 	}
   7440 
   7441 	if (m0 != NULL) {
   7442 		if (!is_transmit)
   7443 			ifp->if_flags |= IFF_OACTIVE;
   7444 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7445 		WM_Q_EVCNT_INCR(txq, txdrop);
   7446 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7447 			__func__));
   7448 		m_freem(m0);
   7449 	}
   7450 
   7451 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7452 		/* No more slots; notify upper layer. */
   7453 		if (!is_transmit)
   7454 			ifp->if_flags |= IFF_OACTIVE;
   7455 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7456 	}
   7457 
   7458 	if (txq->txq_free != ofree) {
   7459 		/* Set a watchdog timer in case the chip flakes out. */
   7460 		txq->txq_lastsent = time_uptime;
   7461 		txq->txq_watchdog = true;
   7462 	}
   7463 }
   7464 
   7465 /*
   7466  * wm_nq_tx_offload:
   7467  *
   7468  *	Set up TCP/IP checksumming parameters for the
   7469  *	specified packet, for NEWQUEUE devices
   7470  */
   7471 static int
   7472 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7473     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7474 {
   7475 	struct mbuf *m0 = txs->txs_mbuf;
   7476 	uint32_t vl_len, mssidx, cmdc;
   7477 	struct ether_header *eh;
   7478 	int offset, iphl;
   7479 
   7480 	/*
   7481 	 * XXX It would be nice if the mbuf pkthdr had offset
   7482 	 * fields for the protocol headers.
   7483 	 */
   7484 	*cmdlenp = 0;
   7485 	*fieldsp = 0;
   7486 
   7487 	eh = mtod(m0, struct ether_header *);
   7488 	switch (htons(eh->ether_type)) {
   7489 	case ETHERTYPE_IP:
   7490 	case ETHERTYPE_IPV6:
   7491 		offset = ETHER_HDR_LEN;
   7492 		break;
   7493 
   7494 	case ETHERTYPE_VLAN:
   7495 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7496 		break;
   7497 
   7498 	default:
   7499 		/* Don't support this protocol or encapsulation. */
   7500 		*do_csum = false;
   7501 		return 0;
   7502 	}
   7503 	*do_csum = true;
   7504 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7505 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7506 
   7507 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7508 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7509 
   7510 	if ((m0->m_pkthdr.csum_flags &
   7511 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7512 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7513 	} else {
   7514 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7515 	}
   7516 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7517 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7518 
   7519 	if (vlan_has_tag(m0)) {
   7520 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7521 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7522 		*cmdlenp |= NQTX_CMD_VLE;
   7523 	}
   7524 
   7525 	mssidx = 0;
   7526 
   7527 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7528 		int hlen = offset + iphl;
   7529 		int tcp_hlen;
   7530 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7531 
   7532 		if (__predict_false(m0->m_len <
   7533 				    (hlen + sizeof(struct tcphdr)))) {
   7534 			/*
   7535 			 * TCP/IP headers are not in the first mbuf; we need
   7536 			 * to do this the slow and painful way.  Let's just
   7537 			 * hope this doesn't happen very often.
   7538 			 */
   7539 			struct tcphdr th;
   7540 
   7541 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7542 
   7543 			m_copydata(m0, hlen, sizeof(th), &th);
   7544 			if (v4) {
   7545 				struct ip ip;
   7546 
   7547 				m_copydata(m0, offset, sizeof(ip), &ip);
   7548 				ip.ip_len = 0;
   7549 				m_copyback(m0,
   7550 				    offset + offsetof(struct ip, ip_len),
   7551 				    sizeof(ip.ip_len), &ip.ip_len);
   7552 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7553 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7554 			} else {
   7555 				struct ip6_hdr ip6;
   7556 
   7557 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7558 				ip6.ip6_plen = 0;
   7559 				m_copyback(m0,
   7560 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7561 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7562 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7563 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7564 			}
   7565 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7566 			    sizeof(th.th_sum), &th.th_sum);
   7567 
   7568 			tcp_hlen = th.th_off << 2;
   7569 		} else {
   7570 			/*
   7571 			 * TCP/IP headers are in the first mbuf; we can do
   7572 			 * this the easy way.
   7573 			 */
   7574 			struct tcphdr *th;
   7575 
   7576 			if (v4) {
   7577 				struct ip *ip =
   7578 				    (void *)(mtod(m0, char *) + offset);
   7579 				th = (void *)(mtod(m0, char *) + hlen);
   7580 
   7581 				ip->ip_len = 0;
   7582 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7583 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7584 			} else {
   7585 				struct ip6_hdr *ip6 =
   7586 				    (void *)(mtod(m0, char *) + offset);
   7587 				th = (void *)(mtod(m0, char *) + hlen);
   7588 
   7589 				ip6->ip6_plen = 0;
   7590 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7591 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7592 			}
   7593 			tcp_hlen = th->th_off << 2;
   7594 		}
   7595 		hlen += tcp_hlen;
   7596 		*cmdlenp |= NQTX_CMD_TSE;
   7597 
   7598 		if (v4) {
   7599 			WM_Q_EVCNT_INCR(txq, txtso);
   7600 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7601 		} else {
   7602 			WM_Q_EVCNT_INCR(txq, txtso6);
   7603 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7604 		}
   7605 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7606 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7607 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7608 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7609 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7610 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7611 	} else {
   7612 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7613 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7614 	}
   7615 
   7616 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7617 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7618 		cmdc |= NQTXC_CMD_IP4;
   7619 	}
   7620 
   7621 	if (m0->m_pkthdr.csum_flags &
   7622 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7623 		WM_Q_EVCNT_INCR(txq, txtusum);
   7624 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7625 			cmdc |= NQTXC_CMD_TCP;
   7626 		} else {
   7627 			cmdc |= NQTXC_CMD_UDP;
   7628 		}
   7629 		cmdc |= NQTXC_CMD_IP4;
   7630 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7631 	}
   7632 	if (m0->m_pkthdr.csum_flags &
   7633 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7634 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7635 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7636 			cmdc |= NQTXC_CMD_TCP;
   7637 		} else {
   7638 			cmdc |= NQTXC_CMD_UDP;
   7639 		}
   7640 		cmdc |= NQTXC_CMD_IP6;
   7641 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7642 	}
   7643 
   7644 	/*
   7645 	 * We don't have to write context descriptor for every packet to
   7646 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7647 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7648 	 * controllers.
   7649 	 * It would be overhead to write context descriptor for every packet,
   7650 	 * however it does not cause problems.
   7651 	 */
   7652 	/* Fill in the context descriptor. */
   7653 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7654 	    htole32(vl_len);
   7655 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7656 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7657 	    htole32(cmdc);
   7658 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7659 	    htole32(mssidx);
   7660 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7661 	DPRINTF(WM_DEBUG_TX,
   7662 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7663 	    txq->txq_next, 0, vl_len));
   7664 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7665 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7666 	txs->txs_ndesc++;
   7667 	return 0;
   7668 }
   7669 
   7670 /*
   7671  * wm_nq_start:		[ifnet interface function]
   7672  *
   7673  *	Start packet transmission on the interface for NEWQUEUE devices
   7674  */
   7675 static void
   7676 wm_nq_start(struct ifnet *ifp)
   7677 {
   7678 	struct wm_softc *sc = ifp->if_softc;
   7679 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7680 
   7681 #ifdef WM_MPSAFE
   7682 	KASSERT(if_is_mpsafe(ifp));
   7683 #endif
   7684 	/*
   7685 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7686 	 */
   7687 
   7688 	mutex_enter(txq->txq_lock);
   7689 	if (!txq->txq_stopping)
   7690 		wm_nq_start_locked(ifp);
   7691 	mutex_exit(txq->txq_lock);
   7692 }
   7693 
   7694 static void
   7695 wm_nq_start_locked(struct ifnet *ifp)
   7696 {
   7697 	struct wm_softc *sc = ifp->if_softc;
   7698 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7699 
   7700 	wm_nq_send_common_locked(ifp, txq, false);
   7701 }
   7702 
   7703 static int
   7704 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7705 {
   7706 	int qid;
   7707 	struct wm_softc *sc = ifp->if_softc;
   7708 	struct wm_txqueue *txq;
   7709 
   7710 	qid = wm_select_txqueue(ifp, m);
   7711 	txq = &sc->sc_queue[qid].wmq_txq;
   7712 
   7713 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7714 		m_freem(m);
   7715 		WM_Q_EVCNT_INCR(txq, txdrop);
   7716 		return ENOBUFS;
   7717 	}
   7718 
   7719 	/*
   7720 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7721 	 */
   7722 	ifp->if_obytes += m->m_pkthdr.len;
   7723 	if (m->m_flags & M_MCAST)
   7724 		ifp->if_omcasts++;
   7725 
   7726 	/*
   7727 	 * The situations which this mutex_tryenter() fails at running time
   7728 	 * are below two patterns.
   7729 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7730 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7731 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7732 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7733 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7734 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7735 	 */
   7736 	if (mutex_tryenter(txq->txq_lock)) {
   7737 		if (!txq->txq_stopping)
   7738 			wm_nq_transmit_locked(ifp, txq);
   7739 		mutex_exit(txq->txq_lock);
   7740 	}
   7741 
   7742 	return 0;
   7743 }
   7744 
   7745 static void
   7746 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7747 {
   7748 
   7749 	wm_nq_send_common_locked(ifp, txq, true);
   7750 }
   7751 
   7752 static void
   7753 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7754     bool is_transmit)
   7755 {
   7756 	struct wm_softc *sc = ifp->if_softc;
   7757 	struct mbuf *m0;
   7758 	struct wm_txsoft *txs;
   7759 	bus_dmamap_t dmamap;
   7760 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7761 	bool do_csum, sent;
   7762 
   7763 	KASSERT(mutex_owned(txq->txq_lock));
   7764 
   7765 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7766 		return;
   7767 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7768 		return;
   7769 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7770 		return;
   7771 
   7772 	sent = false;
   7773 
   7774 	/*
   7775 	 * Loop through the send queue, setting up transmit descriptors
   7776 	 * until we drain the queue, or use up all available transmit
   7777 	 * descriptors.
   7778 	 */
   7779 	for (;;) {
   7780 		m0 = NULL;
   7781 
   7782 		/* Get a work queue entry. */
   7783 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7784 			wm_txeof(txq, UINT_MAX);
   7785 			if (txq->txq_sfree == 0) {
   7786 				DPRINTF(WM_DEBUG_TX,
   7787 				    ("%s: TX: no free job descriptors\n",
   7788 					device_xname(sc->sc_dev)));
   7789 				WM_Q_EVCNT_INCR(txq, txsstall);
   7790 				break;
   7791 			}
   7792 		}
   7793 
   7794 		/* Grab a packet off the queue. */
   7795 		if (is_transmit)
   7796 			m0 = pcq_get(txq->txq_interq);
   7797 		else
   7798 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7799 		if (m0 == NULL)
   7800 			break;
   7801 
   7802 		DPRINTF(WM_DEBUG_TX,
   7803 		    ("%s: TX: have packet to transmit: %p\n",
   7804 		    device_xname(sc->sc_dev), m0));
   7805 
   7806 		txs = &txq->txq_soft[txq->txq_snext];
   7807 		dmamap = txs->txs_dmamap;
   7808 
   7809 		/*
   7810 		 * Load the DMA map.  If this fails, the packet either
   7811 		 * didn't fit in the allotted number of segments, or we
   7812 		 * were short on resources.  For the too-many-segments
   7813 		 * case, we simply report an error and drop the packet,
   7814 		 * since we can't sanely copy a jumbo packet to a single
   7815 		 * buffer.
   7816 		 */
   7817 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7818 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7819 		if (error) {
   7820 			if (error == EFBIG) {
   7821 				WM_Q_EVCNT_INCR(txq, txdrop);
   7822 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7823 				    "DMA segments, dropping...\n",
   7824 				    device_xname(sc->sc_dev));
   7825 				wm_dump_mbuf_chain(sc, m0);
   7826 				m_freem(m0);
   7827 				continue;
   7828 			}
   7829 			/* Short on resources, just stop for now. */
   7830 			DPRINTF(WM_DEBUG_TX,
   7831 			    ("%s: TX: dmamap load failed: %d\n",
   7832 			    device_xname(sc->sc_dev), error));
   7833 			break;
   7834 		}
   7835 
   7836 		segs_needed = dmamap->dm_nsegs;
   7837 
   7838 		/*
   7839 		 * Ensure we have enough descriptors free to describe
   7840 		 * the packet.  Note, we always reserve one descriptor
   7841 		 * at the end of the ring due to the semantics of the
   7842 		 * TDT register, plus one more in the event we need
   7843 		 * to load offload context.
   7844 		 */
   7845 		if (segs_needed > txq->txq_free - 2) {
   7846 			/*
   7847 			 * Not enough free descriptors to transmit this
   7848 			 * packet.  We haven't committed anything yet,
   7849 			 * so just unload the DMA map, put the packet
   7850 			 * pack on the queue, and punt.  Notify the upper
   7851 			 * layer that there are no more slots left.
   7852 			 */
   7853 			DPRINTF(WM_DEBUG_TX,
   7854 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7855 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7856 			    segs_needed, txq->txq_free - 1));
   7857 			if (!is_transmit)
   7858 				ifp->if_flags |= IFF_OACTIVE;
   7859 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7860 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7861 			WM_Q_EVCNT_INCR(txq, txdstall);
   7862 			break;
   7863 		}
   7864 
   7865 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7866 
   7867 		DPRINTF(WM_DEBUG_TX,
   7868 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7869 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7870 
   7871 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7872 
   7873 		/*
   7874 		 * Store a pointer to the packet so that we can free it
   7875 		 * later.
   7876 		 *
   7877 		 * Initially, we consider the number of descriptors the
   7878 		 * packet uses the number of DMA segments.  This may be
   7879 		 * incremented by 1 if we do checksum offload (a descriptor
   7880 		 * is used to set the checksum context).
   7881 		 */
   7882 		txs->txs_mbuf = m0;
   7883 		txs->txs_firstdesc = txq->txq_next;
   7884 		txs->txs_ndesc = segs_needed;
   7885 
   7886 		/* Set up offload parameters for this packet. */
   7887 		uint32_t cmdlen, fields, dcmdlen;
   7888 		if (m0->m_pkthdr.csum_flags &
   7889 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7890 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7891 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7892 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7893 			    &do_csum) != 0) {
   7894 				/* Error message already displayed. */
   7895 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7896 				continue;
   7897 			}
   7898 		} else {
   7899 			do_csum = false;
   7900 			cmdlen = 0;
   7901 			fields = 0;
   7902 		}
   7903 
   7904 		/* Sync the DMA map. */
   7905 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7906 		    BUS_DMASYNC_PREWRITE);
   7907 
   7908 		/* Initialize the first transmit descriptor. */
   7909 		nexttx = txq->txq_next;
   7910 		if (!do_csum) {
   7911 			/* setup a legacy descriptor */
   7912 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7913 			    dmamap->dm_segs[0].ds_addr);
   7914 			txq->txq_descs[nexttx].wtx_cmdlen =
   7915 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7916 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7917 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7918 			if (vlan_has_tag(m0)) {
   7919 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7920 				    htole32(WTX_CMD_VLE);
   7921 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7922 				    htole16(vlan_get_tag(m0));
   7923 			} else {
   7924 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7925 			}
   7926 			dcmdlen = 0;
   7927 		} else {
   7928 			/* setup an advanced data descriptor */
   7929 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7930 			    htole64(dmamap->dm_segs[0].ds_addr);
   7931 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7932 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7933 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7934 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7935 			    htole32(fields);
   7936 			DPRINTF(WM_DEBUG_TX,
   7937 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7938 			    device_xname(sc->sc_dev), nexttx,
   7939 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7940 			DPRINTF(WM_DEBUG_TX,
   7941 			    ("\t 0x%08x%08x\n", fields,
   7942 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7943 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7944 		}
   7945 
   7946 		lasttx = nexttx;
   7947 		nexttx = WM_NEXTTX(txq, nexttx);
   7948 		/*
   7949 		 * fill in the next descriptors. legacy or advanced format
   7950 		 * is the same here
   7951 		 */
   7952 		for (seg = 1; seg < dmamap->dm_nsegs;
   7953 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7954 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7955 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7956 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7957 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7958 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7959 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7960 			lasttx = nexttx;
   7961 
   7962 			DPRINTF(WM_DEBUG_TX,
   7963 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7964 			     "len %#04zx\n",
   7965 			    device_xname(sc->sc_dev), nexttx,
   7966 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7967 			    dmamap->dm_segs[seg].ds_len));
   7968 		}
   7969 
   7970 		KASSERT(lasttx != -1);
   7971 
   7972 		/*
   7973 		 * Set up the command byte on the last descriptor of
   7974 		 * the packet.  If we're in the interrupt delay window,
   7975 		 * delay the interrupt.
   7976 		 */
   7977 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7978 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7979 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7980 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7981 
   7982 		txs->txs_lastdesc = lasttx;
   7983 
   7984 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7985 		    device_xname(sc->sc_dev),
   7986 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7987 
   7988 		/* Sync the descriptors we're using. */
   7989 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7990 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7991 
   7992 		/* Give the packet to the chip. */
   7993 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7994 		sent = true;
   7995 
   7996 		DPRINTF(WM_DEBUG_TX,
   7997 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7998 
   7999 		DPRINTF(WM_DEBUG_TX,
   8000 		    ("%s: TX: finished transmitting packet, job %d\n",
   8001 		    device_xname(sc->sc_dev), txq->txq_snext));
   8002 
   8003 		/* Advance the tx pointer. */
   8004 		txq->txq_free -= txs->txs_ndesc;
   8005 		txq->txq_next = nexttx;
   8006 
   8007 		txq->txq_sfree--;
   8008 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8009 
   8010 		/* Pass the packet to any BPF listeners. */
   8011 		bpf_mtap(ifp, m0);
   8012 	}
   8013 
   8014 	if (m0 != NULL) {
   8015 		if (!is_transmit)
   8016 			ifp->if_flags |= IFF_OACTIVE;
   8017 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8018 		WM_Q_EVCNT_INCR(txq, txdrop);
   8019 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8020 			__func__));
   8021 		m_freem(m0);
   8022 	}
   8023 
   8024 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8025 		/* No more slots; notify upper layer. */
   8026 		if (!is_transmit)
   8027 			ifp->if_flags |= IFF_OACTIVE;
   8028 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8029 	}
   8030 
   8031 	if (sent) {
   8032 		/* Set a watchdog timer in case the chip flakes out. */
   8033 		txq->txq_lastsent = time_uptime;
   8034 		txq->txq_watchdog = true;
   8035 	}
   8036 }
   8037 
   8038 static void
   8039 wm_deferred_start_locked(struct wm_txqueue *txq)
   8040 {
   8041 	struct wm_softc *sc = txq->txq_sc;
   8042 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8043 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8044 	int qid = wmq->wmq_id;
   8045 
   8046 	KASSERT(mutex_owned(txq->txq_lock));
   8047 
   8048 	if (txq->txq_stopping) {
   8049 		mutex_exit(txq->txq_lock);
   8050 		return;
   8051 	}
   8052 
   8053 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8054 		/* XXX need for ALTQ or one CPU system */
   8055 		if (qid == 0)
   8056 			wm_nq_start_locked(ifp);
   8057 		wm_nq_transmit_locked(ifp, txq);
   8058 	} else {
   8059 		/* XXX need for ALTQ or one CPU system */
   8060 		if (qid == 0)
   8061 			wm_start_locked(ifp);
   8062 		wm_transmit_locked(ifp, txq);
   8063 	}
   8064 }
   8065 
   8066 /* Interrupt */
   8067 
   8068 /*
   8069  * wm_txeof:
   8070  *
   8071  *	Helper; handle transmit interrupts.
   8072  */
   8073 static bool
   8074 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8075 {
   8076 	struct wm_softc *sc = txq->txq_sc;
   8077 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8078 	struct wm_txsoft *txs;
   8079 	int count = 0;
   8080 	int i;
   8081 	uint8_t status;
   8082 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8083 	bool more = false;
   8084 
   8085 	KASSERT(mutex_owned(txq->txq_lock));
   8086 
   8087 	if (txq->txq_stopping)
   8088 		return false;
   8089 
   8090 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8091 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8092 	if (wmq->wmq_id == 0)
   8093 		ifp->if_flags &= ~IFF_OACTIVE;
   8094 
   8095 	/*
   8096 	 * Go through the Tx list and free mbufs for those
   8097 	 * frames which have been transmitted.
   8098 	 */
   8099 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8100 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8101 		if (limit-- == 0) {
   8102 			more = true;
   8103 			DPRINTF(WM_DEBUG_TX,
   8104 			    ("%s: TX: loop limited, job %d is not processed\n",
   8105 				device_xname(sc->sc_dev), i));
   8106 			break;
   8107 		}
   8108 
   8109 		txs = &txq->txq_soft[i];
   8110 
   8111 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8112 			device_xname(sc->sc_dev), i));
   8113 
   8114 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8115 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8116 
   8117 		status =
   8118 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8119 		if ((status & WTX_ST_DD) == 0) {
   8120 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8121 			    BUS_DMASYNC_PREREAD);
   8122 			break;
   8123 		}
   8124 
   8125 		count++;
   8126 		DPRINTF(WM_DEBUG_TX,
   8127 		    ("%s: TX: job %d done: descs %d..%d\n",
   8128 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8129 		    txs->txs_lastdesc));
   8130 
   8131 		/*
   8132 		 * XXX We should probably be using the statistics
   8133 		 * XXX registers, but I don't know if they exist
   8134 		 * XXX on chips before the i82544.
   8135 		 */
   8136 
   8137 #ifdef WM_EVENT_COUNTERS
   8138 		if (status & WTX_ST_TU)
   8139 			WM_Q_EVCNT_INCR(txq, tu);
   8140 #endif /* WM_EVENT_COUNTERS */
   8141 
   8142 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8143 			ifp->if_oerrors++;
   8144 			if (status & WTX_ST_LC)
   8145 				log(LOG_WARNING, "%s: late collision\n",
   8146 				    device_xname(sc->sc_dev));
   8147 			else if (status & WTX_ST_EC) {
   8148 				ifp->if_collisions += 16;
   8149 				log(LOG_WARNING, "%s: excessive collisions\n",
   8150 				    device_xname(sc->sc_dev));
   8151 			}
   8152 		} else
   8153 			ifp->if_opackets++;
   8154 
   8155 		txq->txq_packets++;
   8156 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8157 
   8158 		txq->txq_free += txs->txs_ndesc;
   8159 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8160 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8161 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8162 		m_freem(txs->txs_mbuf);
   8163 		txs->txs_mbuf = NULL;
   8164 	}
   8165 
   8166 	/* Update the dirty transmit buffer pointer. */
   8167 	txq->txq_sdirty = i;
   8168 	DPRINTF(WM_DEBUG_TX,
   8169 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8170 
   8171 	if (count != 0)
   8172 		rnd_add_uint32(&sc->rnd_source, count);
   8173 
   8174 	/*
   8175 	 * If there are no more pending transmissions, cancel the watchdog
   8176 	 * timer.
   8177 	 */
   8178 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8179 		txq->txq_watchdog = false;
   8180 
   8181 	return more;
   8182 }
   8183 
   8184 static inline uint32_t
   8185 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8186 {
   8187 	struct wm_softc *sc = rxq->rxq_sc;
   8188 
   8189 	if (sc->sc_type == WM_T_82574)
   8190 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8191 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8192 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8193 	else
   8194 		return rxq->rxq_descs[idx].wrx_status;
   8195 }
   8196 
   8197 static inline uint32_t
   8198 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8199 {
   8200 	struct wm_softc *sc = rxq->rxq_sc;
   8201 
   8202 	if (sc->sc_type == WM_T_82574)
   8203 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8204 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8205 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8206 	else
   8207 		return rxq->rxq_descs[idx].wrx_errors;
   8208 }
   8209 
   8210 static inline uint16_t
   8211 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8212 {
   8213 	struct wm_softc *sc = rxq->rxq_sc;
   8214 
   8215 	if (sc->sc_type == WM_T_82574)
   8216 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8217 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8218 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8219 	else
   8220 		return rxq->rxq_descs[idx].wrx_special;
   8221 }
   8222 
   8223 static inline int
   8224 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8225 {
   8226 	struct wm_softc *sc = rxq->rxq_sc;
   8227 
   8228 	if (sc->sc_type == WM_T_82574)
   8229 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8230 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8231 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8232 	else
   8233 		return rxq->rxq_descs[idx].wrx_len;
   8234 }
   8235 
   8236 #ifdef WM_DEBUG
   8237 static inline uint32_t
   8238 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8239 {
   8240 	struct wm_softc *sc = rxq->rxq_sc;
   8241 
   8242 	if (sc->sc_type == WM_T_82574)
   8243 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8244 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8245 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8246 	else
   8247 		return 0;
   8248 }
   8249 
   8250 static inline uint8_t
   8251 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8252 {
   8253 	struct wm_softc *sc = rxq->rxq_sc;
   8254 
   8255 	if (sc->sc_type == WM_T_82574)
   8256 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8257 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8258 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8259 	else
   8260 		return 0;
   8261 }
   8262 #endif /* WM_DEBUG */
   8263 
   8264 static inline bool
   8265 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8266     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8267 {
   8268 
   8269 	if (sc->sc_type == WM_T_82574)
   8270 		return (status & ext_bit) != 0;
   8271 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8272 		return (status & nq_bit) != 0;
   8273 	else
   8274 		return (status & legacy_bit) != 0;
   8275 }
   8276 
   8277 static inline bool
   8278 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8279     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8280 {
   8281 
   8282 	if (sc->sc_type == WM_T_82574)
   8283 		return (error & ext_bit) != 0;
   8284 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8285 		return (error & nq_bit) != 0;
   8286 	else
   8287 		return (error & legacy_bit) != 0;
   8288 }
   8289 
   8290 static inline bool
   8291 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8292 {
   8293 
   8294 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8295 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8296 		return true;
   8297 	else
   8298 		return false;
   8299 }
   8300 
   8301 static inline bool
   8302 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8303 {
   8304 	struct wm_softc *sc = rxq->rxq_sc;
   8305 
   8306 	/* XXXX missing error bit for newqueue? */
   8307 	if (wm_rxdesc_is_set_error(sc, errors,
   8308 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8309 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8310 		NQRXC_ERROR_RXE)) {
   8311 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8312 			log(LOG_WARNING, "%s: symbol error\n",
   8313 			    device_xname(sc->sc_dev));
   8314 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8315 			log(LOG_WARNING, "%s: receive sequence error\n",
   8316 			    device_xname(sc->sc_dev));
   8317 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8318 			log(LOG_WARNING, "%s: CRC error\n",
   8319 			    device_xname(sc->sc_dev));
   8320 		return true;
   8321 	}
   8322 
   8323 	return false;
   8324 }
   8325 
   8326 static inline bool
   8327 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8328 {
   8329 	struct wm_softc *sc = rxq->rxq_sc;
   8330 
   8331 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8332 		NQRXC_STATUS_DD)) {
   8333 		/* We have processed all of the receive descriptors. */
   8334 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8335 		return false;
   8336 	}
   8337 
   8338 	return true;
   8339 }
   8340 
   8341 static inline bool
   8342 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8343     struct mbuf *m)
   8344 {
   8345 
   8346 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8347 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8348 		vlan_set_tag(m, le16toh(vlantag));
   8349 	}
   8350 
   8351 	return true;
   8352 }
   8353 
   8354 static inline void
   8355 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8356     uint32_t errors, struct mbuf *m)
   8357 {
   8358 	struct wm_softc *sc = rxq->rxq_sc;
   8359 
   8360 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8361 		if (wm_rxdesc_is_set_status(sc, status,
   8362 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8363 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8364 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8365 			if (wm_rxdesc_is_set_error(sc, errors,
   8366 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8367 				m->m_pkthdr.csum_flags |=
   8368 					M_CSUM_IPv4_BAD;
   8369 		}
   8370 		if (wm_rxdesc_is_set_status(sc, status,
   8371 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8372 			/*
   8373 			 * Note: we don't know if this was TCP or UDP,
   8374 			 * so we just set both bits, and expect the
   8375 			 * upper layers to deal.
   8376 			 */
   8377 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8378 			m->m_pkthdr.csum_flags |=
   8379 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8380 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8381 			if (wm_rxdesc_is_set_error(sc, errors,
   8382 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8383 				m->m_pkthdr.csum_flags |=
   8384 					M_CSUM_TCP_UDP_BAD;
   8385 		}
   8386 	}
   8387 }
   8388 
   8389 /*
   8390  * wm_rxeof:
   8391  *
   8392  *	Helper; handle receive interrupts.
   8393  */
   8394 static bool
   8395 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8396 {
   8397 	struct wm_softc *sc = rxq->rxq_sc;
   8398 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8399 	struct wm_rxsoft *rxs;
   8400 	struct mbuf *m;
   8401 	int i, len;
   8402 	int count = 0;
   8403 	uint32_t status, errors;
   8404 	uint16_t vlantag;
   8405 	bool more = false;
   8406 
   8407 	KASSERT(mutex_owned(rxq->rxq_lock));
   8408 
   8409 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8410 		if (limit-- == 0) {
   8411 			rxq->rxq_ptr = i;
   8412 			more = true;
   8413 			DPRINTF(WM_DEBUG_RX,
   8414 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8415 				device_xname(sc->sc_dev), i));
   8416 			break;
   8417 		}
   8418 
   8419 		rxs = &rxq->rxq_soft[i];
   8420 
   8421 		DPRINTF(WM_DEBUG_RX,
   8422 		    ("%s: RX: checking descriptor %d\n",
   8423 		    device_xname(sc->sc_dev), i));
   8424 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8425 
   8426 		status = wm_rxdesc_get_status(rxq, i);
   8427 		errors = wm_rxdesc_get_errors(rxq, i);
   8428 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8429 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8430 #ifdef WM_DEBUG
   8431 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8432 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8433 #endif
   8434 
   8435 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8436 			/*
   8437 			 * Update the receive pointer holding rxq_lock
   8438 			 * consistent with increment counter.
   8439 			 */
   8440 			rxq->rxq_ptr = i;
   8441 			break;
   8442 		}
   8443 
   8444 		count++;
   8445 		if (__predict_false(rxq->rxq_discard)) {
   8446 			DPRINTF(WM_DEBUG_RX,
   8447 			    ("%s: RX: discarding contents of descriptor %d\n",
   8448 			    device_xname(sc->sc_dev), i));
   8449 			wm_init_rxdesc(rxq, i);
   8450 			if (wm_rxdesc_is_eop(rxq, status)) {
   8451 				/* Reset our state. */
   8452 				DPRINTF(WM_DEBUG_RX,
   8453 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8454 				    device_xname(sc->sc_dev)));
   8455 				rxq->rxq_discard = 0;
   8456 			}
   8457 			continue;
   8458 		}
   8459 
   8460 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8461 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8462 
   8463 		m = rxs->rxs_mbuf;
   8464 
   8465 		/*
   8466 		 * Add a new receive buffer to the ring, unless of
   8467 		 * course the length is zero. Treat the latter as a
   8468 		 * failed mapping.
   8469 		 */
   8470 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8471 			/*
   8472 			 * Failed, throw away what we've done so
   8473 			 * far, and discard the rest of the packet.
   8474 			 */
   8475 			ifp->if_ierrors++;
   8476 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8477 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8478 			wm_init_rxdesc(rxq, i);
   8479 			if (!wm_rxdesc_is_eop(rxq, status))
   8480 				rxq->rxq_discard = 1;
   8481 			if (rxq->rxq_head != NULL)
   8482 				m_freem(rxq->rxq_head);
   8483 			WM_RXCHAIN_RESET(rxq);
   8484 			DPRINTF(WM_DEBUG_RX,
   8485 			    ("%s: RX: Rx buffer allocation failed, "
   8486 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8487 			    rxq->rxq_discard ? " (discard)" : ""));
   8488 			continue;
   8489 		}
   8490 
   8491 		m->m_len = len;
   8492 		rxq->rxq_len += len;
   8493 		DPRINTF(WM_DEBUG_RX,
   8494 		    ("%s: RX: buffer at %p len %d\n",
   8495 		    device_xname(sc->sc_dev), m->m_data, len));
   8496 
   8497 		/* If this is not the end of the packet, keep looking. */
   8498 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8499 			WM_RXCHAIN_LINK(rxq, m);
   8500 			DPRINTF(WM_DEBUG_RX,
   8501 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8502 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8503 			continue;
   8504 		}
   8505 
   8506 		/*
   8507 		 * Okay, we have the entire packet now.  The chip is
   8508 		 * configured to include the FCS except I350 and I21[01]
   8509 		 * (not all chips can be configured to strip it),
   8510 		 * so we need to trim it.
   8511 		 * May need to adjust length of previous mbuf in the
   8512 		 * chain if the current mbuf is too short.
   8513 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8514 		 * is always set in I350, so we don't trim it.
   8515 		 */
   8516 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8517 		    && (sc->sc_type != WM_T_I210)
   8518 		    && (sc->sc_type != WM_T_I211)) {
   8519 			if (m->m_len < ETHER_CRC_LEN) {
   8520 				rxq->rxq_tail->m_len
   8521 				    -= (ETHER_CRC_LEN - m->m_len);
   8522 				m->m_len = 0;
   8523 			} else
   8524 				m->m_len -= ETHER_CRC_LEN;
   8525 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8526 		} else
   8527 			len = rxq->rxq_len;
   8528 
   8529 		WM_RXCHAIN_LINK(rxq, m);
   8530 
   8531 		*rxq->rxq_tailp = NULL;
   8532 		m = rxq->rxq_head;
   8533 
   8534 		WM_RXCHAIN_RESET(rxq);
   8535 
   8536 		DPRINTF(WM_DEBUG_RX,
   8537 		    ("%s: RX: have entire packet, len -> %d\n",
   8538 		    device_xname(sc->sc_dev), len));
   8539 
   8540 		/* If an error occurred, update stats and drop the packet. */
   8541 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8542 			m_freem(m);
   8543 			continue;
   8544 		}
   8545 
   8546 		/* No errors.  Receive the packet. */
   8547 		m_set_rcvif(m, ifp);
   8548 		m->m_pkthdr.len = len;
   8549 		/*
   8550 		 * TODO
   8551 		 * should be save rsshash and rsstype to this mbuf.
   8552 		 */
   8553 		DPRINTF(WM_DEBUG_RX,
   8554 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8555 			device_xname(sc->sc_dev), rsstype, rsshash));
   8556 
   8557 		/*
   8558 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8559 		 * for us.  Associate the tag with the packet.
   8560 		 */
   8561 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8562 			continue;
   8563 
   8564 		/* Set up checksum info for this packet. */
   8565 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8566 		/*
   8567 		 * Update the receive pointer holding rxq_lock consistent with
   8568 		 * increment counter.
   8569 		 */
   8570 		rxq->rxq_ptr = i;
   8571 		rxq->rxq_packets++;
   8572 		rxq->rxq_bytes += len;
   8573 		mutex_exit(rxq->rxq_lock);
   8574 
   8575 		/* Pass it on. */
   8576 		if_percpuq_enqueue(sc->sc_ipq, m);
   8577 
   8578 		mutex_enter(rxq->rxq_lock);
   8579 
   8580 		if (rxq->rxq_stopping)
   8581 			break;
   8582 	}
   8583 
   8584 	if (count != 0)
   8585 		rnd_add_uint32(&sc->rnd_source, count);
   8586 
   8587 	DPRINTF(WM_DEBUG_RX,
   8588 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8589 
   8590 	return more;
   8591 }
   8592 
   8593 /*
   8594  * wm_linkintr_gmii:
   8595  *
   8596  *	Helper; handle link interrupts for GMII.
   8597  */
   8598 static void
   8599 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8600 {
   8601 
   8602 	KASSERT(WM_CORE_LOCKED(sc));
   8603 
   8604 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8605 		__func__));
   8606 
   8607 	if (icr & ICR_LSC) {
   8608 		uint32_t reg;
   8609 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8610 
   8611 		if ((status & STATUS_LU) != 0) {
   8612 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8613 				device_xname(sc->sc_dev),
   8614 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8615 		} else {
   8616 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8617 				device_xname(sc->sc_dev)));
   8618 		}
   8619 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8620 			wm_gig_downshift_workaround_ich8lan(sc);
   8621 
   8622 		if ((sc->sc_type == WM_T_ICH8)
   8623 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8624 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8625 		}
   8626 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8627 			device_xname(sc->sc_dev)));
   8628 		mii_pollstat(&sc->sc_mii);
   8629 		if (sc->sc_type == WM_T_82543) {
   8630 			int miistatus, active;
   8631 
   8632 			/*
   8633 			 * With 82543, we need to force speed and
   8634 			 * duplex on the MAC equal to what the PHY
   8635 			 * speed and duplex configuration is.
   8636 			 */
   8637 			miistatus = sc->sc_mii.mii_media_status;
   8638 
   8639 			if (miistatus & IFM_ACTIVE) {
   8640 				active = sc->sc_mii.mii_media_active;
   8641 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8642 				switch (IFM_SUBTYPE(active)) {
   8643 				case IFM_10_T:
   8644 					sc->sc_ctrl |= CTRL_SPEED_10;
   8645 					break;
   8646 				case IFM_100_TX:
   8647 					sc->sc_ctrl |= CTRL_SPEED_100;
   8648 					break;
   8649 				case IFM_1000_T:
   8650 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8651 					break;
   8652 				default:
   8653 					/*
   8654 					 * fiber?
   8655 					 * Shoud not enter here.
   8656 					 */
   8657 					printf("unknown media (%x)\n", active);
   8658 					break;
   8659 				}
   8660 				if (active & IFM_FDX)
   8661 					sc->sc_ctrl |= CTRL_FD;
   8662 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8663 			}
   8664 		} else if (sc->sc_type == WM_T_PCH) {
   8665 			wm_k1_gig_workaround_hv(sc,
   8666 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8667 		}
   8668 
   8669 		if ((sc->sc_phytype == WMPHY_82578)
   8670 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8671 			== IFM_1000_T)) {
   8672 
   8673 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8674 				delay(200*1000); /* XXX too big */
   8675 
   8676 				/* Link stall fix for link up */
   8677 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8678 				    HV_MUX_DATA_CTRL,
   8679 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8680 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8681 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8682 				    HV_MUX_DATA_CTRL,
   8683 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8684 			}
   8685 		}
   8686 		/*
   8687 		 * I217 Packet Loss issue:
   8688 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8689 		 * on power up.
   8690 		 * Set the Beacon Duration for I217 to 8 usec
   8691 		 */
   8692 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8693 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8694 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8695 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8696 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8697 		}
   8698 
   8699 		/* XXX Work-around I218 hang issue */
   8700 		/* e1000_k1_workaround_lpt_lp() */
   8701 
   8702 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8703 			/*
   8704 			 * Set platform power management values for Latency
   8705 			 * Tolerance Reporting (LTR)
   8706 			 */
   8707 			wm_platform_pm_pch_lpt(sc,
   8708 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8709 				    != 0));
   8710 		}
   8711 
   8712 		/* FEXTNVM6 K1-off workaround */
   8713 		if (sc->sc_type == WM_T_PCH_SPT) {
   8714 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8715 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8716 			    & FEXTNVM6_K1_OFF_ENABLE)
   8717 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8718 			else
   8719 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8720 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8721 		}
   8722 	} else if (icr & ICR_RXSEQ) {
   8723 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8724 			device_xname(sc->sc_dev)));
   8725 	}
   8726 }
   8727 
   8728 /*
   8729  * wm_linkintr_tbi:
   8730  *
   8731  *	Helper; handle link interrupts for TBI mode.
   8732  */
   8733 static void
   8734 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8735 {
   8736 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8737 	uint32_t status;
   8738 
   8739 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8740 		__func__));
   8741 
   8742 	status = CSR_READ(sc, WMREG_STATUS);
   8743 	if (icr & ICR_LSC) {
   8744 		if (status & STATUS_LU) {
   8745 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8746 			    device_xname(sc->sc_dev),
   8747 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8748 			/*
   8749 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8750 			 * so we should update sc->sc_ctrl
   8751 			 */
   8752 
   8753 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8754 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8755 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8756 			if (status & STATUS_FD)
   8757 				sc->sc_tctl |=
   8758 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8759 			else
   8760 				sc->sc_tctl |=
   8761 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8762 			if (sc->sc_ctrl & CTRL_TFCE)
   8763 				sc->sc_fcrtl |= FCRTL_XONE;
   8764 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8765 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8766 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8767 				      sc->sc_fcrtl);
   8768 			sc->sc_tbi_linkup = 1;
   8769 			if_link_state_change(ifp, LINK_STATE_UP);
   8770 		} else {
   8771 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8772 			    device_xname(sc->sc_dev)));
   8773 			sc->sc_tbi_linkup = 0;
   8774 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8775 		}
   8776 		/* Update LED */
   8777 		wm_tbi_serdes_set_linkled(sc);
   8778 	} else if (icr & ICR_RXSEQ) {
   8779 		DPRINTF(WM_DEBUG_LINK,
   8780 		    ("%s: LINK: Receive sequence error\n",
   8781 		    device_xname(sc->sc_dev)));
   8782 	}
   8783 }
   8784 
   8785 /*
   8786  * wm_linkintr_serdes:
   8787  *
   8788  *	Helper; handle link interrupts for TBI mode.
   8789  */
   8790 static void
   8791 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8792 {
   8793 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8794 	struct mii_data *mii = &sc->sc_mii;
   8795 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8796 	uint32_t pcs_adv, pcs_lpab, reg;
   8797 
   8798 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8799 		__func__));
   8800 
   8801 	if (icr & ICR_LSC) {
   8802 		/* Check PCS */
   8803 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8804 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8805 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8806 				device_xname(sc->sc_dev)));
   8807 			mii->mii_media_status |= IFM_ACTIVE;
   8808 			sc->sc_tbi_linkup = 1;
   8809 			if_link_state_change(ifp, LINK_STATE_UP);
   8810 		} else {
   8811 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8812 				device_xname(sc->sc_dev)));
   8813 			mii->mii_media_status |= IFM_NONE;
   8814 			sc->sc_tbi_linkup = 0;
   8815 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8816 			wm_tbi_serdes_set_linkled(sc);
   8817 			return;
   8818 		}
   8819 		mii->mii_media_active |= IFM_1000_SX;
   8820 		if ((reg & PCS_LSTS_FDX) != 0)
   8821 			mii->mii_media_active |= IFM_FDX;
   8822 		else
   8823 			mii->mii_media_active |= IFM_HDX;
   8824 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8825 			/* Check flow */
   8826 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8827 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8828 				DPRINTF(WM_DEBUG_LINK,
   8829 				    ("XXX LINKOK but not ACOMP\n"));
   8830 				return;
   8831 			}
   8832 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8833 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8834 			DPRINTF(WM_DEBUG_LINK,
   8835 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8836 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8837 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8838 				mii->mii_media_active |= IFM_FLOW
   8839 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8840 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8841 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8842 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8843 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8844 				mii->mii_media_active |= IFM_FLOW
   8845 				    | IFM_ETH_TXPAUSE;
   8846 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8847 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8848 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8849 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8850 				mii->mii_media_active |= IFM_FLOW
   8851 				    | IFM_ETH_RXPAUSE;
   8852 		}
   8853 		/* Update LED */
   8854 		wm_tbi_serdes_set_linkled(sc);
   8855 	} else {
   8856 		DPRINTF(WM_DEBUG_LINK,
   8857 		    ("%s: LINK: Receive sequence error\n",
   8858 		    device_xname(sc->sc_dev)));
   8859 	}
   8860 }
   8861 
   8862 /*
   8863  * wm_linkintr:
   8864  *
   8865  *	Helper; handle link interrupts.
   8866  */
   8867 static void
   8868 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8869 {
   8870 
   8871 	KASSERT(WM_CORE_LOCKED(sc));
   8872 
   8873 	if (sc->sc_flags & WM_F_HAS_MII)
   8874 		wm_linkintr_gmii(sc, icr);
   8875 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8876 	    && (sc->sc_type >= WM_T_82575))
   8877 		wm_linkintr_serdes(sc, icr);
   8878 	else
   8879 		wm_linkintr_tbi(sc, icr);
   8880 }
   8881 
   8882 /*
   8883  * wm_intr_legacy:
   8884  *
   8885  *	Interrupt service routine for INTx and MSI.
   8886  */
   8887 static int
   8888 wm_intr_legacy(void *arg)
   8889 {
   8890 	struct wm_softc *sc = arg;
   8891 	struct wm_queue *wmq = &sc->sc_queue[0];
   8892 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8893 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8894 	uint32_t icr, rndval = 0;
   8895 	int handled = 0;
   8896 
   8897 	while (1 /* CONSTCOND */) {
   8898 		icr = CSR_READ(sc, WMREG_ICR);
   8899 		if ((icr & sc->sc_icr) == 0)
   8900 			break;
   8901 		if (handled == 0) {
   8902 			DPRINTF(WM_DEBUG_TX,
   8903 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8904 		}
   8905 		if (rndval == 0)
   8906 			rndval = icr;
   8907 
   8908 		mutex_enter(rxq->rxq_lock);
   8909 
   8910 		if (rxq->rxq_stopping) {
   8911 			mutex_exit(rxq->rxq_lock);
   8912 			break;
   8913 		}
   8914 
   8915 		handled = 1;
   8916 
   8917 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8918 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8919 			DPRINTF(WM_DEBUG_RX,
   8920 			    ("%s: RX: got Rx intr 0x%08x\n",
   8921 			    device_xname(sc->sc_dev),
   8922 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8923 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8924 		}
   8925 #endif
   8926 		/*
   8927 		 * wm_rxeof() does *not* call upper layer functions directly,
   8928 		 * as if_percpuq_enqueue() just call softint_schedule().
   8929 		 * So, we can call wm_rxeof() in interrupt context.
   8930 		 */
   8931 		wm_rxeof(rxq, UINT_MAX);
   8932 
   8933 		mutex_exit(rxq->rxq_lock);
   8934 		mutex_enter(txq->txq_lock);
   8935 
   8936 		if (txq->txq_stopping) {
   8937 			mutex_exit(txq->txq_lock);
   8938 			break;
   8939 		}
   8940 
   8941 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8942 		if (icr & ICR_TXDW) {
   8943 			DPRINTF(WM_DEBUG_TX,
   8944 			    ("%s: TX: got TXDW interrupt\n",
   8945 			    device_xname(sc->sc_dev)));
   8946 			WM_Q_EVCNT_INCR(txq, txdw);
   8947 		}
   8948 #endif
   8949 		wm_txeof(txq, UINT_MAX);
   8950 
   8951 		mutex_exit(txq->txq_lock);
   8952 		WM_CORE_LOCK(sc);
   8953 
   8954 		if (sc->sc_core_stopping) {
   8955 			WM_CORE_UNLOCK(sc);
   8956 			break;
   8957 		}
   8958 
   8959 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8960 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8961 			wm_linkintr(sc, icr);
   8962 		}
   8963 
   8964 		WM_CORE_UNLOCK(sc);
   8965 
   8966 		if (icr & ICR_RXO) {
   8967 #if defined(WM_DEBUG)
   8968 			log(LOG_WARNING, "%s: Receive overrun\n",
   8969 			    device_xname(sc->sc_dev));
   8970 #endif /* defined(WM_DEBUG) */
   8971 		}
   8972 	}
   8973 
   8974 	rnd_add_uint32(&sc->rnd_source, rndval);
   8975 
   8976 	if (handled) {
   8977 		/* Try to get more packets going. */
   8978 		softint_schedule(wmq->wmq_si);
   8979 	}
   8980 
   8981 	return handled;
   8982 }
   8983 
   8984 static inline void
   8985 wm_txrxintr_disable(struct wm_queue *wmq)
   8986 {
   8987 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8988 
   8989 	if (sc->sc_type == WM_T_82574)
   8990 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8991 	else if (sc->sc_type == WM_T_82575)
   8992 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8993 	else
   8994 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8995 }
   8996 
   8997 static inline void
   8998 wm_txrxintr_enable(struct wm_queue *wmq)
   8999 {
   9000 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9001 
   9002 	wm_itrs_calculate(sc, wmq);
   9003 
   9004 	/*
   9005 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9006 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9007 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9008 	 * while each wm_handle_queue(wmq) is runnig.
   9009 	 */
   9010 	if (sc->sc_type == WM_T_82574)
   9011 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9012 	else if (sc->sc_type == WM_T_82575)
   9013 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9014 	else
   9015 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9016 }
   9017 
   9018 static int
   9019 wm_txrxintr_msix(void *arg)
   9020 {
   9021 	struct wm_queue *wmq = arg;
   9022 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9023 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9024 	struct wm_softc *sc = txq->txq_sc;
   9025 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9026 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9027 	bool txmore;
   9028 	bool rxmore;
   9029 
   9030 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9031 
   9032 	DPRINTF(WM_DEBUG_TX,
   9033 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9034 
   9035 	wm_txrxintr_disable(wmq);
   9036 
   9037 	mutex_enter(txq->txq_lock);
   9038 
   9039 	if (txq->txq_stopping) {
   9040 		mutex_exit(txq->txq_lock);
   9041 		return 0;
   9042 	}
   9043 
   9044 	WM_Q_EVCNT_INCR(txq, txdw);
   9045 	txmore = wm_txeof(txq, txlimit);
   9046 	/* wm_deferred start() is done in wm_handle_queue(). */
   9047 	mutex_exit(txq->txq_lock);
   9048 
   9049 	DPRINTF(WM_DEBUG_RX,
   9050 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9051 	mutex_enter(rxq->rxq_lock);
   9052 
   9053 	if (rxq->rxq_stopping) {
   9054 		mutex_exit(rxq->rxq_lock);
   9055 		return 0;
   9056 	}
   9057 
   9058 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9059 	rxmore = wm_rxeof(rxq, rxlimit);
   9060 	mutex_exit(rxq->rxq_lock);
   9061 
   9062 	wm_itrs_writereg(sc, wmq);
   9063 
   9064 	if (txmore || rxmore)
   9065 		softint_schedule(wmq->wmq_si);
   9066 	else
   9067 		wm_txrxintr_enable(wmq);
   9068 
   9069 	return 1;
   9070 }
   9071 
   9072 static void
   9073 wm_handle_queue(void *arg)
   9074 {
   9075 	struct wm_queue *wmq = arg;
   9076 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9077 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9078 	struct wm_softc *sc = txq->txq_sc;
   9079 	u_int txlimit = sc->sc_tx_process_limit;
   9080 	u_int rxlimit = sc->sc_rx_process_limit;
   9081 	bool txmore;
   9082 	bool rxmore;
   9083 
   9084 	mutex_enter(txq->txq_lock);
   9085 	if (txq->txq_stopping) {
   9086 		mutex_exit(txq->txq_lock);
   9087 		return;
   9088 	}
   9089 	txmore = wm_txeof(txq, txlimit);
   9090 	wm_deferred_start_locked(txq);
   9091 	mutex_exit(txq->txq_lock);
   9092 
   9093 	mutex_enter(rxq->rxq_lock);
   9094 	if (rxq->rxq_stopping) {
   9095 		mutex_exit(rxq->rxq_lock);
   9096 		return;
   9097 	}
   9098 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9099 	rxmore = wm_rxeof(rxq, rxlimit);
   9100 	mutex_exit(rxq->rxq_lock);
   9101 
   9102 	if (txmore || rxmore)
   9103 		softint_schedule(wmq->wmq_si);
   9104 	else
   9105 		wm_txrxintr_enable(wmq);
   9106 }
   9107 
   9108 /*
   9109  * wm_linkintr_msix:
   9110  *
   9111  *	Interrupt service routine for link status change for MSI-X.
   9112  */
   9113 static int
   9114 wm_linkintr_msix(void *arg)
   9115 {
   9116 	struct wm_softc *sc = arg;
   9117 	uint32_t reg;
   9118 	bool has_rxo;
   9119 
   9120 	DPRINTF(WM_DEBUG_LINK,
   9121 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9122 
   9123 	reg = CSR_READ(sc, WMREG_ICR);
   9124 	WM_CORE_LOCK(sc);
   9125 	if (sc->sc_core_stopping)
   9126 		goto out;
   9127 
   9128 	if((reg & ICR_LSC) != 0) {
   9129 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9130 		wm_linkintr(sc, ICR_LSC);
   9131 	}
   9132 
   9133 	/*
   9134 	 * XXX 82574 MSI-X mode workaround
   9135 	 *
   9136 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9137 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9138 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9139 	 * interrupts by writing WMREG_ICS to process receive packets.
   9140 	 */
   9141 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9142 #if defined(WM_DEBUG)
   9143 		log(LOG_WARNING, "%s: Receive overrun\n",
   9144 		    device_xname(sc->sc_dev));
   9145 #endif /* defined(WM_DEBUG) */
   9146 
   9147 		has_rxo = true;
   9148 		/*
   9149 		 * The RXO interrupt is very high rate when receive traffic is
   9150 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9151 		 * interrupts. ICR_OTHER will be enabled at the end of
   9152 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9153 		 * ICR_RXQ(1) interrupts.
   9154 		 */
   9155 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9156 
   9157 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9158 	}
   9159 
   9160 
   9161 
   9162 out:
   9163 	WM_CORE_UNLOCK(sc);
   9164 
   9165 	if (sc->sc_type == WM_T_82574) {
   9166 		if (!has_rxo)
   9167 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9168 		else
   9169 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9170 	} else if (sc->sc_type == WM_T_82575)
   9171 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9172 	else
   9173 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9174 
   9175 	return 1;
   9176 }
   9177 
   9178 /*
   9179  * Media related.
   9180  * GMII, SGMII, TBI (and SERDES)
   9181  */
   9182 
   9183 /* Common */
   9184 
   9185 /*
   9186  * wm_tbi_serdes_set_linkled:
   9187  *
   9188  *	Update the link LED on TBI and SERDES devices.
   9189  */
   9190 static void
   9191 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9192 {
   9193 
   9194 	if (sc->sc_tbi_linkup)
   9195 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9196 	else
   9197 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9198 
   9199 	/* 82540 or newer devices are active low */
   9200 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9201 
   9202 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9203 }
   9204 
   9205 /* GMII related */
   9206 
   9207 /*
   9208  * wm_gmii_reset:
   9209  *
   9210  *	Reset the PHY.
   9211  */
   9212 static void
   9213 wm_gmii_reset(struct wm_softc *sc)
   9214 {
   9215 	uint32_t reg;
   9216 	int rv;
   9217 
   9218 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9219 		device_xname(sc->sc_dev), __func__));
   9220 
   9221 	rv = sc->phy.acquire(sc);
   9222 	if (rv != 0) {
   9223 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9224 		    __func__);
   9225 		return;
   9226 	}
   9227 
   9228 	switch (sc->sc_type) {
   9229 	case WM_T_82542_2_0:
   9230 	case WM_T_82542_2_1:
   9231 		/* null */
   9232 		break;
   9233 	case WM_T_82543:
   9234 		/*
   9235 		 * With 82543, we need to force speed and duplex on the MAC
   9236 		 * equal to what the PHY speed and duplex configuration is.
   9237 		 * In addition, we need to perform a hardware reset on the PHY
   9238 		 * to take it out of reset.
   9239 		 */
   9240 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9241 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9242 
   9243 		/* The PHY reset pin is active-low. */
   9244 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9245 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9246 		    CTRL_EXT_SWDPIN(4));
   9247 		reg |= CTRL_EXT_SWDPIO(4);
   9248 
   9249 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9250 		CSR_WRITE_FLUSH(sc);
   9251 		delay(10*1000);
   9252 
   9253 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9254 		CSR_WRITE_FLUSH(sc);
   9255 		delay(150);
   9256 #if 0
   9257 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9258 #endif
   9259 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9260 		break;
   9261 	case WM_T_82544:	/* reset 10000us */
   9262 	case WM_T_82540:
   9263 	case WM_T_82545:
   9264 	case WM_T_82545_3:
   9265 	case WM_T_82546:
   9266 	case WM_T_82546_3:
   9267 	case WM_T_82541:
   9268 	case WM_T_82541_2:
   9269 	case WM_T_82547:
   9270 	case WM_T_82547_2:
   9271 	case WM_T_82571:	/* reset 100us */
   9272 	case WM_T_82572:
   9273 	case WM_T_82573:
   9274 	case WM_T_82574:
   9275 	case WM_T_82575:
   9276 	case WM_T_82576:
   9277 	case WM_T_82580:
   9278 	case WM_T_I350:
   9279 	case WM_T_I354:
   9280 	case WM_T_I210:
   9281 	case WM_T_I211:
   9282 	case WM_T_82583:
   9283 	case WM_T_80003:
   9284 		/* generic reset */
   9285 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9286 		CSR_WRITE_FLUSH(sc);
   9287 		delay(20000);
   9288 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9289 		CSR_WRITE_FLUSH(sc);
   9290 		delay(20000);
   9291 
   9292 		if ((sc->sc_type == WM_T_82541)
   9293 		    || (sc->sc_type == WM_T_82541_2)
   9294 		    || (sc->sc_type == WM_T_82547)
   9295 		    || (sc->sc_type == WM_T_82547_2)) {
   9296 			/* workaround for igp are done in igp_reset() */
   9297 			/* XXX add code to set LED after phy reset */
   9298 		}
   9299 		break;
   9300 	case WM_T_ICH8:
   9301 	case WM_T_ICH9:
   9302 	case WM_T_ICH10:
   9303 	case WM_T_PCH:
   9304 	case WM_T_PCH2:
   9305 	case WM_T_PCH_LPT:
   9306 	case WM_T_PCH_SPT:
   9307 	case WM_T_PCH_CNP:
   9308 		/* generic reset */
   9309 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9310 		CSR_WRITE_FLUSH(sc);
   9311 		delay(100);
   9312 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9313 		CSR_WRITE_FLUSH(sc);
   9314 		delay(150);
   9315 		break;
   9316 	default:
   9317 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9318 		    __func__);
   9319 		break;
   9320 	}
   9321 
   9322 	sc->phy.release(sc);
   9323 
   9324 	/* get_cfg_done */
   9325 	wm_get_cfg_done(sc);
   9326 
   9327 	/* extra setup */
   9328 	switch (sc->sc_type) {
   9329 	case WM_T_82542_2_0:
   9330 	case WM_T_82542_2_1:
   9331 	case WM_T_82543:
   9332 	case WM_T_82544:
   9333 	case WM_T_82540:
   9334 	case WM_T_82545:
   9335 	case WM_T_82545_3:
   9336 	case WM_T_82546:
   9337 	case WM_T_82546_3:
   9338 	case WM_T_82541_2:
   9339 	case WM_T_82547_2:
   9340 	case WM_T_82571:
   9341 	case WM_T_82572:
   9342 	case WM_T_82573:
   9343 	case WM_T_82574:
   9344 	case WM_T_82583:
   9345 	case WM_T_82575:
   9346 	case WM_T_82576:
   9347 	case WM_T_82580:
   9348 	case WM_T_I350:
   9349 	case WM_T_I354:
   9350 	case WM_T_I210:
   9351 	case WM_T_I211:
   9352 	case WM_T_80003:
   9353 		/* null */
   9354 		break;
   9355 	case WM_T_82541:
   9356 	case WM_T_82547:
   9357 		/* XXX Configure actively LED after PHY reset */
   9358 		break;
   9359 	case WM_T_ICH8:
   9360 	case WM_T_ICH9:
   9361 	case WM_T_ICH10:
   9362 	case WM_T_PCH:
   9363 	case WM_T_PCH2:
   9364 	case WM_T_PCH_LPT:
   9365 	case WM_T_PCH_SPT:
   9366 	case WM_T_PCH_CNP:
   9367 		wm_phy_post_reset(sc);
   9368 		break;
   9369 	default:
   9370 		panic("%s: unknown type\n", __func__);
   9371 		break;
   9372 	}
   9373 }
   9374 
   9375 /*
   9376  * Setup sc_phytype and mii_{read|write}reg.
   9377  *
   9378  *  To identify PHY type, correct read/write function should be selected.
   9379  * To select correct read/write function, PCI ID or MAC type are required
   9380  * without accessing PHY registers.
   9381  *
   9382  *  On the first call of this function, PHY ID is not known yet. Check
   9383  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9384  * result might be incorrect.
   9385  *
   9386  *  In the second call, PHY OUI and model is used to identify PHY type.
   9387  * It might not be perfpect because of the lack of compared entry, but it
   9388  * would be better than the first call.
   9389  *
   9390  *  If the detected new result and previous assumption is different,
   9391  * diagnous message will be printed.
   9392  */
   9393 static void
   9394 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9395     uint16_t phy_model)
   9396 {
   9397 	device_t dev = sc->sc_dev;
   9398 	struct mii_data *mii = &sc->sc_mii;
   9399 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9400 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9401 	mii_readreg_t new_readreg;
   9402 	mii_writereg_t new_writereg;
   9403 
   9404 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9405 		device_xname(sc->sc_dev), __func__));
   9406 
   9407 	if (mii->mii_readreg == NULL) {
   9408 		/*
   9409 		 *  This is the first call of this function. For ICH and PCH
   9410 		 * variants, it's difficult to determine the PHY access method
   9411 		 * by sc_type, so use the PCI product ID for some devices.
   9412 		 */
   9413 
   9414 		switch (sc->sc_pcidevid) {
   9415 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9416 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9417 			/* 82577 */
   9418 			new_phytype = WMPHY_82577;
   9419 			break;
   9420 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9421 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9422 			/* 82578 */
   9423 			new_phytype = WMPHY_82578;
   9424 			break;
   9425 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9426 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9427 			/* 82579 */
   9428 			new_phytype = WMPHY_82579;
   9429 			break;
   9430 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9431 		case PCI_PRODUCT_INTEL_82801I_BM:
   9432 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9433 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9434 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9435 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9436 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9437 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9438 			/* ICH8, 9, 10 with 82567 */
   9439 			new_phytype = WMPHY_BM;
   9440 			break;
   9441 		default:
   9442 			break;
   9443 		}
   9444 	} else {
   9445 		/* It's not the first call. Use PHY OUI and model */
   9446 		switch (phy_oui) {
   9447 		case MII_OUI_ATHEROS: /* XXX ??? */
   9448 			switch (phy_model) {
   9449 			case 0x0004: /* XXX */
   9450 				new_phytype = WMPHY_82578;
   9451 				break;
   9452 			default:
   9453 				break;
   9454 			}
   9455 			break;
   9456 		case MII_OUI_xxMARVELL:
   9457 			switch (phy_model) {
   9458 			case MII_MODEL_xxMARVELL_I210:
   9459 				new_phytype = WMPHY_I210;
   9460 				break;
   9461 			case MII_MODEL_xxMARVELL_E1011:
   9462 			case MII_MODEL_xxMARVELL_E1000_3:
   9463 			case MII_MODEL_xxMARVELL_E1000_5:
   9464 			case MII_MODEL_xxMARVELL_E1112:
   9465 				new_phytype = WMPHY_M88;
   9466 				break;
   9467 			case MII_MODEL_xxMARVELL_E1149:
   9468 				new_phytype = WMPHY_BM;
   9469 				break;
   9470 			case MII_MODEL_xxMARVELL_E1111:
   9471 			case MII_MODEL_xxMARVELL_I347:
   9472 			case MII_MODEL_xxMARVELL_E1512:
   9473 			case MII_MODEL_xxMARVELL_E1340M:
   9474 			case MII_MODEL_xxMARVELL_E1543:
   9475 				new_phytype = WMPHY_M88;
   9476 				break;
   9477 			case MII_MODEL_xxMARVELL_I82563:
   9478 				new_phytype = WMPHY_GG82563;
   9479 				break;
   9480 			default:
   9481 				break;
   9482 			}
   9483 			break;
   9484 		case MII_OUI_INTEL:
   9485 			switch (phy_model) {
   9486 			case MII_MODEL_INTEL_I82577:
   9487 				new_phytype = WMPHY_82577;
   9488 				break;
   9489 			case MII_MODEL_INTEL_I82579:
   9490 				new_phytype = WMPHY_82579;
   9491 				break;
   9492 			case MII_MODEL_INTEL_I217:
   9493 				new_phytype = WMPHY_I217;
   9494 				break;
   9495 			case MII_MODEL_INTEL_I82580:
   9496 			case MII_MODEL_INTEL_I350:
   9497 				new_phytype = WMPHY_82580;
   9498 				break;
   9499 			default:
   9500 				break;
   9501 			}
   9502 			break;
   9503 		case MII_OUI_yyINTEL:
   9504 			switch (phy_model) {
   9505 			case MII_MODEL_yyINTEL_I82562G:
   9506 			case MII_MODEL_yyINTEL_I82562EM:
   9507 			case MII_MODEL_yyINTEL_I82562ET:
   9508 				new_phytype = WMPHY_IFE;
   9509 				break;
   9510 			case MII_MODEL_yyINTEL_IGP01E1000:
   9511 				new_phytype = WMPHY_IGP;
   9512 				break;
   9513 			case MII_MODEL_yyINTEL_I82566:
   9514 				new_phytype = WMPHY_IGP_3;
   9515 				break;
   9516 			default:
   9517 				break;
   9518 			}
   9519 			break;
   9520 		default:
   9521 			break;
   9522 		}
   9523 		if (new_phytype == WMPHY_UNKNOWN)
   9524 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9525 			    __func__);
   9526 
   9527 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9528 		    && (sc->sc_phytype != new_phytype )) {
   9529 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9530 			    "was incorrect. PHY type from PHY ID = %u\n",
   9531 			    sc->sc_phytype, new_phytype);
   9532 		}
   9533 	}
   9534 
   9535 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9536 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9537 		/* SGMII */
   9538 		new_readreg = wm_sgmii_readreg;
   9539 		new_writereg = wm_sgmii_writereg;
   9540 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9541 		/* BM2 (phyaddr == 1) */
   9542 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9543 		    && (new_phytype != WMPHY_BM)
   9544 		    && (new_phytype != WMPHY_UNKNOWN))
   9545 			doubt_phytype = new_phytype;
   9546 		new_phytype = WMPHY_BM;
   9547 		new_readreg = wm_gmii_bm_readreg;
   9548 		new_writereg = wm_gmii_bm_writereg;
   9549 	} else if (sc->sc_type >= WM_T_PCH) {
   9550 		/* All PCH* use _hv_ */
   9551 		new_readreg = wm_gmii_hv_readreg;
   9552 		new_writereg = wm_gmii_hv_writereg;
   9553 	} else if (sc->sc_type >= WM_T_ICH8) {
   9554 		/* non-82567 ICH8, 9 and 10 */
   9555 		new_readreg = wm_gmii_i82544_readreg;
   9556 		new_writereg = wm_gmii_i82544_writereg;
   9557 	} else if (sc->sc_type >= WM_T_80003) {
   9558 		/* 80003 */
   9559 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9560 		    && (new_phytype != WMPHY_GG82563)
   9561 		    && (new_phytype != WMPHY_UNKNOWN))
   9562 			doubt_phytype = new_phytype;
   9563 		new_phytype = WMPHY_GG82563;
   9564 		new_readreg = wm_gmii_i80003_readreg;
   9565 		new_writereg = wm_gmii_i80003_writereg;
   9566 	} else if (sc->sc_type >= WM_T_I210) {
   9567 		/* I210 and I211 */
   9568 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9569 		    && (new_phytype != WMPHY_I210)
   9570 		    && (new_phytype != WMPHY_UNKNOWN))
   9571 			doubt_phytype = new_phytype;
   9572 		new_phytype = WMPHY_I210;
   9573 		new_readreg = wm_gmii_gs40g_readreg;
   9574 		new_writereg = wm_gmii_gs40g_writereg;
   9575 	} else if (sc->sc_type >= WM_T_82580) {
   9576 		/* 82580, I350 and I354 */
   9577 		new_readreg = wm_gmii_82580_readreg;
   9578 		new_writereg = wm_gmii_82580_writereg;
   9579 	} else if (sc->sc_type >= WM_T_82544) {
   9580 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9581 		new_readreg = wm_gmii_i82544_readreg;
   9582 		new_writereg = wm_gmii_i82544_writereg;
   9583 	} else {
   9584 		new_readreg = wm_gmii_i82543_readreg;
   9585 		new_writereg = wm_gmii_i82543_writereg;
   9586 	}
   9587 
   9588 	if (new_phytype == WMPHY_BM) {
   9589 		/* All BM use _bm_ */
   9590 		new_readreg = wm_gmii_bm_readreg;
   9591 		new_writereg = wm_gmii_bm_writereg;
   9592 	}
   9593 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9594 		/* All PCH* use _hv_ */
   9595 		new_readreg = wm_gmii_hv_readreg;
   9596 		new_writereg = wm_gmii_hv_writereg;
   9597 	}
   9598 
   9599 	/* Diag output */
   9600 	if (doubt_phytype != WMPHY_UNKNOWN)
   9601 		aprint_error_dev(dev, "Assumed new PHY type was "
   9602 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9603 		    new_phytype);
   9604 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9605 	    && (sc->sc_phytype != new_phytype ))
   9606 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9607 		    "was incorrect. New PHY type = %u\n",
   9608 		    sc->sc_phytype, new_phytype);
   9609 
   9610 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9611 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9612 
   9613 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9614 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9615 		    "function was incorrect.\n");
   9616 
   9617 	/* Update now */
   9618 	sc->sc_phytype = new_phytype;
   9619 	mii->mii_readreg = new_readreg;
   9620 	mii->mii_writereg = new_writereg;
   9621 }
   9622 
   9623 /*
   9624  * wm_get_phy_id_82575:
   9625  *
   9626  * Return PHY ID. Return -1 if it failed.
   9627  */
   9628 static int
   9629 wm_get_phy_id_82575(struct wm_softc *sc)
   9630 {
   9631 	uint32_t reg;
   9632 	int phyid = -1;
   9633 
   9634 	/* XXX */
   9635 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9636 		return -1;
   9637 
   9638 	if (wm_sgmii_uses_mdio(sc)) {
   9639 		switch (sc->sc_type) {
   9640 		case WM_T_82575:
   9641 		case WM_T_82576:
   9642 			reg = CSR_READ(sc, WMREG_MDIC);
   9643 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9644 			break;
   9645 		case WM_T_82580:
   9646 		case WM_T_I350:
   9647 		case WM_T_I354:
   9648 		case WM_T_I210:
   9649 		case WM_T_I211:
   9650 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9651 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9652 			break;
   9653 		default:
   9654 			return -1;
   9655 		}
   9656 	}
   9657 
   9658 	return phyid;
   9659 }
   9660 
   9661 
   9662 /*
   9663  * wm_gmii_mediainit:
   9664  *
   9665  *	Initialize media for use on 1000BASE-T devices.
   9666  */
   9667 static void
   9668 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9669 {
   9670 	device_t dev = sc->sc_dev;
   9671 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9672 	struct mii_data *mii = &sc->sc_mii;
   9673 	uint32_t reg;
   9674 
   9675 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9676 		device_xname(sc->sc_dev), __func__));
   9677 
   9678 	/* We have GMII. */
   9679 	sc->sc_flags |= WM_F_HAS_MII;
   9680 
   9681 	if (sc->sc_type == WM_T_80003)
   9682 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9683 	else
   9684 		sc->sc_tipg = TIPG_1000T_DFLT;
   9685 
   9686 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9687 	if ((sc->sc_type == WM_T_82580)
   9688 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9689 	    || (sc->sc_type == WM_T_I211)) {
   9690 		reg = CSR_READ(sc, WMREG_PHPM);
   9691 		reg &= ~PHPM_GO_LINK_D;
   9692 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9693 	}
   9694 
   9695 	/*
   9696 	 * Let the chip set speed/duplex on its own based on
   9697 	 * signals from the PHY.
   9698 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9699 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9700 	 */
   9701 	sc->sc_ctrl |= CTRL_SLU;
   9702 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9703 
   9704 	/* Initialize our media structures and probe the GMII. */
   9705 	mii->mii_ifp = ifp;
   9706 
   9707 	mii->mii_statchg = wm_gmii_statchg;
   9708 
   9709 	/* get PHY control from SMBus to PCIe */
   9710 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9711 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9712 	    || (sc->sc_type == WM_T_PCH_CNP))
   9713 		wm_smbustopci(sc);
   9714 
   9715 	wm_gmii_reset(sc);
   9716 
   9717 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9718 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9719 	    wm_gmii_mediastatus);
   9720 
   9721 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9722 	    || (sc->sc_type == WM_T_82580)
   9723 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9724 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9725 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9726 			/* Attach only one port */
   9727 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9728 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9729 		} else {
   9730 			int i, id;
   9731 			uint32_t ctrl_ext;
   9732 
   9733 			id = wm_get_phy_id_82575(sc);
   9734 			if (id != -1) {
   9735 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9736 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9737 			}
   9738 			if ((id == -1)
   9739 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9740 				/* Power on sgmii phy if it is disabled */
   9741 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9742 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9743 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9744 				CSR_WRITE_FLUSH(sc);
   9745 				delay(300*1000); /* XXX too long */
   9746 
   9747 				/* from 1 to 8 */
   9748 				for (i = 1; i < 8; i++)
   9749 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9750 					    0xffffffff, i, MII_OFFSET_ANY,
   9751 					    MIIF_DOPAUSE);
   9752 
   9753 				/* restore previous sfp cage power state */
   9754 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9755 			}
   9756 		}
   9757 	} else {
   9758 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9759 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9760 	}
   9761 
   9762 	/*
   9763 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9764 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9765 	 */
   9766 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9767 		|| (sc->sc_type == WM_T_PCH_SPT)
   9768 		|| (sc->sc_type == WM_T_PCH_CNP))
   9769 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9770 		wm_set_mdio_slow_mode_hv(sc);
   9771 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9772 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9773 	}
   9774 
   9775 	/*
   9776 	 * (For ICH8 variants)
   9777 	 * If PHY detection failed, use BM's r/w function and retry.
   9778 	 */
   9779 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9780 		/* if failed, retry with *_bm_* */
   9781 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9782 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9783 		    sc->sc_phytype);
   9784 		sc->sc_phytype = WMPHY_BM;
   9785 		mii->mii_readreg = wm_gmii_bm_readreg;
   9786 		mii->mii_writereg = wm_gmii_bm_writereg;
   9787 
   9788 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9789 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9790 	}
   9791 
   9792 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9793 		/* Any PHY wasn't find */
   9794 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9795 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9796 		sc->sc_phytype = WMPHY_NONE;
   9797 	} else {
   9798 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9799 
   9800 		/*
   9801 		 * PHY Found! Check PHY type again by the second call of
   9802 		 * wm_gmii_setup_phytype.
   9803 		 */
   9804 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9805 		    child->mii_mpd_model);
   9806 
   9807 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9808 	}
   9809 }
   9810 
   9811 /*
   9812  * wm_gmii_mediachange:	[ifmedia interface function]
   9813  *
   9814  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9815  */
   9816 static int
   9817 wm_gmii_mediachange(struct ifnet *ifp)
   9818 {
   9819 	struct wm_softc *sc = ifp->if_softc;
   9820 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9821 	int rc;
   9822 
   9823 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9824 		device_xname(sc->sc_dev), __func__));
   9825 	if ((ifp->if_flags & IFF_UP) == 0)
   9826 		return 0;
   9827 
   9828 	/* Disable D0 LPLU. */
   9829 	wm_lplu_d0_disable(sc);
   9830 
   9831 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9832 	sc->sc_ctrl |= CTRL_SLU;
   9833 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9834 	    || (sc->sc_type > WM_T_82543)) {
   9835 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9836 	} else {
   9837 		sc->sc_ctrl &= ~CTRL_ASDE;
   9838 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9839 		if (ife->ifm_media & IFM_FDX)
   9840 			sc->sc_ctrl |= CTRL_FD;
   9841 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9842 		case IFM_10_T:
   9843 			sc->sc_ctrl |= CTRL_SPEED_10;
   9844 			break;
   9845 		case IFM_100_TX:
   9846 			sc->sc_ctrl |= CTRL_SPEED_100;
   9847 			break;
   9848 		case IFM_1000_T:
   9849 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9850 			break;
   9851 		default:
   9852 			panic("wm_gmii_mediachange: bad media 0x%x",
   9853 			    ife->ifm_media);
   9854 		}
   9855 	}
   9856 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9857 	CSR_WRITE_FLUSH(sc);
   9858 	if (sc->sc_type <= WM_T_82543)
   9859 		wm_gmii_reset(sc);
   9860 
   9861 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9862 		return 0;
   9863 	return rc;
   9864 }
   9865 
   9866 /*
   9867  * wm_gmii_mediastatus:	[ifmedia interface function]
   9868  *
   9869  *	Get the current interface media status on a 1000BASE-T device.
   9870  */
   9871 static void
   9872 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9873 {
   9874 	struct wm_softc *sc = ifp->if_softc;
   9875 
   9876 	ether_mediastatus(ifp, ifmr);
   9877 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9878 	    | sc->sc_flowflags;
   9879 }
   9880 
   9881 #define	MDI_IO		CTRL_SWDPIN(2)
   9882 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9883 #define	MDI_CLK		CTRL_SWDPIN(3)
   9884 
   9885 static void
   9886 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9887 {
   9888 	uint32_t i, v;
   9889 
   9890 	v = CSR_READ(sc, WMREG_CTRL);
   9891 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9892 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9893 
   9894 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9895 		if (data & i)
   9896 			v |= MDI_IO;
   9897 		else
   9898 			v &= ~MDI_IO;
   9899 		CSR_WRITE(sc, WMREG_CTRL, v);
   9900 		CSR_WRITE_FLUSH(sc);
   9901 		delay(10);
   9902 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9903 		CSR_WRITE_FLUSH(sc);
   9904 		delay(10);
   9905 		CSR_WRITE(sc, WMREG_CTRL, v);
   9906 		CSR_WRITE_FLUSH(sc);
   9907 		delay(10);
   9908 	}
   9909 }
   9910 
   9911 static uint32_t
   9912 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9913 {
   9914 	uint32_t v, i, data = 0;
   9915 
   9916 	v = CSR_READ(sc, WMREG_CTRL);
   9917 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9918 	v |= CTRL_SWDPIO(3);
   9919 
   9920 	CSR_WRITE(sc, WMREG_CTRL, v);
   9921 	CSR_WRITE_FLUSH(sc);
   9922 	delay(10);
   9923 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9924 	CSR_WRITE_FLUSH(sc);
   9925 	delay(10);
   9926 	CSR_WRITE(sc, WMREG_CTRL, v);
   9927 	CSR_WRITE_FLUSH(sc);
   9928 	delay(10);
   9929 
   9930 	for (i = 0; i < 16; i++) {
   9931 		data <<= 1;
   9932 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9933 		CSR_WRITE_FLUSH(sc);
   9934 		delay(10);
   9935 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9936 			data |= 1;
   9937 		CSR_WRITE(sc, WMREG_CTRL, v);
   9938 		CSR_WRITE_FLUSH(sc);
   9939 		delay(10);
   9940 	}
   9941 
   9942 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9943 	CSR_WRITE_FLUSH(sc);
   9944 	delay(10);
   9945 	CSR_WRITE(sc, WMREG_CTRL, v);
   9946 	CSR_WRITE_FLUSH(sc);
   9947 	delay(10);
   9948 
   9949 	return data;
   9950 }
   9951 
   9952 #undef MDI_IO
   9953 #undef MDI_DIR
   9954 #undef MDI_CLK
   9955 
   9956 /*
   9957  * wm_gmii_i82543_readreg:	[mii interface function]
   9958  *
   9959  *	Read a PHY register on the GMII (i82543 version).
   9960  */
   9961 static int
   9962 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9963 {
   9964 	struct wm_softc *sc = device_private(dev);
   9965 	int rv;
   9966 
   9967 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9968 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9969 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9970 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9971 
   9972 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9973 	    device_xname(dev), phy, reg, rv));
   9974 
   9975 	return rv;
   9976 }
   9977 
   9978 /*
   9979  * wm_gmii_i82543_writereg:	[mii interface function]
   9980  *
   9981  *	Write a PHY register on the GMII (i82543 version).
   9982  */
   9983 static void
   9984 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9985 {
   9986 	struct wm_softc *sc = device_private(dev);
   9987 
   9988 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9989 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9990 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9991 	    (MII_COMMAND_START << 30), 32);
   9992 }
   9993 
   9994 /*
   9995  * wm_gmii_mdic_readreg:	[mii interface function]
   9996  *
   9997  *	Read a PHY register on the GMII.
   9998  */
   9999 static int
   10000 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10001 {
   10002 	struct wm_softc *sc = device_private(dev);
   10003 	uint32_t mdic = 0;
   10004 	int i, rv;
   10005 
   10006 	if (reg > MII_ADDRMASK) {
   10007 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10008 		    __func__, sc->sc_phytype, reg);
   10009 		reg &= MII_ADDRMASK;
   10010 	}
   10011 
   10012 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10013 	    MDIC_REGADD(reg));
   10014 
   10015 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10016 		mdic = CSR_READ(sc, WMREG_MDIC);
   10017 		if (mdic & MDIC_READY)
   10018 			break;
   10019 		delay(50);
   10020 	}
   10021 
   10022 	if ((mdic & MDIC_READY) == 0) {
   10023 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10024 		    device_xname(dev), phy, reg);
   10025 		rv = 0;
   10026 	} else if (mdic & MDIC_E) {
   10027 #if 0 /* This is normal if no PHY is present. */
   10028 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10029 		    device_xname(dev), phy, reg);
   10030 #endif
   10031 		rv = 0;
   10032 	} else {
   10033 		rv = MDIC_DATA(mdic);
   10034 		if (rv == 0xffff)
   10035 			rv = 0;
   10036 	}
   10037 
   10038 	return rv;
   10039 }
   10040 
   10041 /*
   10042  * wm_gmii_mdic_writereg:	[mii interface function]
   10043  *
   10044  *	Write a PHY register on the GMII.
   10045  */
   10046 static void
   10047 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10048 {
   10049 	struct wm_softc *sc = device_private(dev);
   10050 	uint32_t mdic = 0;
   10051 	int i;
   10052 
   10053 	if (reg > MII_ADDRMASK) {
   10054 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10055 		    __func__, sc->sc_phytype, reg);
   10056 		reg &= MII_ADDRMASK;
   10057 	}
   10058 
   10059 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10060 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10061 
   10062 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10063 		mdic = CSR_READ(sc, WMREG_MDIC);
   10064 		if (mdic & MDIC_READY)
   10065 			break;
   10066 		delay(50);
   10067 	}
   10068 
   10069 	if ((mdic & MDIC_READY) == 0)
   10070 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10071 		    device_xname(dev), phy, reg);
   10072 	else if (mdic & MDIC_E)
   10073 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10074 		    device_xname(dev), phy, reg);
   10075 }
   10076 
   10077 /*
   10078  * wm_gmii_i82544_readreg:	[mii interface function]
   10079  *
   10080  *	Read a PHY register on the GMII.
   10081  */
   10082 static int
   10083 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10084 {
   10085 	struct wm_softc *sc = device_private(dev);
   10086 	int rv;
   10087 
   10088 	if (sc->phy.acquire(sc)) {
   10089 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10090 		return 0;
   10091 	}
   10092 
   10093 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10094 		switch (sc->sc_phytype) {
   10095 		case WMPHY_IGP:
   10096 		case WMPHY_IGP_2:
   10097 		case WMPHY_IGP_3:
   10098 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10099 			break;
   10100 		default:
   10101 #ifdef WM_DEBUG
   10102 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10103 			    __func__, sc->sc_phytype, reg);
   10104 #endif
   10105 			break;
   10106 		}
   10107 	}
   10108 
   10109 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10110 	sc->phy.release(sc);
   10111 
   10112 	return rv;
   10113 }
   10114 
   10115 /*
   10116  * wm_gmii_i82544_writereg:	[mii interface function]
   10117  *
   10118  *	Write a PHY register on the GMII.
   10119  */
   10120 static void
   10121 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10122 {
   10123 	struct wm_softc *sc = device_private(dev);
   10124 
   10125 	if (sc->phy.acquire(sc)) {
   10126 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10127 		return;
   10128 	}
   10129 
   10130 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10131 		switch (sc->sc_phytype) {
   10132 		case WMPHY_IGP:
   10133 		case WMPHY_IGP_2:
   10134 		case WMPHY_IGP_3:
   10135 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10136 			break;
   10137 		default:
   10138 #ifdef WM_DEBUG
   10139 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10140 			    __func__, sc->sc_phytype, reg);
   10141 #endif
   10142 			break;
   10143 		}
   10144 	}
   10145 
   10146 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10147 	sc->phy.release(sc);
   10148 }
   10149 
   10150 /*
   10151  * wm_gmii_i80003_readreg:	[mii interface function]
   10152  *
   10153  *	Read a PHY register on the kumeran
   10154  * This could be handled by the PHY layer if we didn't have to lock the
   10155  * ressource ...
   10156  */
   10157 static int
   10158 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10159 {
   10160 	struct wm_softc *sc = device_private(dev);
   10161 	int page_select, temp;
   10162 	int rv;
   10163 
   10164 	if (phy != 1) /* only one PHY on kumeran bus */
   10165 		return 0;
   10166 
   10167 	if (sc->phy.acquire(sc)) {
   10168 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10169 		return 0;
   10170 	}
   10171 
   10172 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10173 		page_select = GG82563_PHY_PAGE_SELECT;
   10174 	else {
   10175 		/*
   10176 		 * Use Alternative Page Select register to access registers
   10177 		 * 30 and 31.
   10178 		 */
   10179 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10180 	}
   10181 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10182 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10183 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10184 		/*
   10185 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10186 		 * register.
   10187 		 */
   10188 		delay(200);
   10189 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10190 			device_printf(dev, "%s failed\n", __func__);
   10191 			rv = 0; /* XXX */
   10192 			goto out;
   10193 		}
   10194 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10195 		delay(200);
   10196 	} else
   10197 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10198 
   10199 out:
   10200 	sc->phy.release(sc);
   10201 	return rv;
   10202 }
   10203 
   10204 /*
   10205  * wm_gmii_i80003_writereg:	[mii interface function]
   10206  *
   10207  *	Write a PHY register on the kumeran.
   10208  * This could be handled by the PHY layer if we didn't have to lock the
   10209  * ressource ...
   10210  */
   10211 static void
   10212 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10213 {
   10214 	struct wm_softc *sc = device_private(dev);
   10215 	int page_select, temp;
   10216 
   10217 	if (phy != 1) /* only one PHY on kumeran bus */
   10218 		return;
   10219 
   10220 	if (sc->phy.acquire(sc)) {
   10221 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10222 		return;
   10223 	}
   10224 
   10225 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10226 		page_select = GG82563_PHY_PAGE_SELECT;
   10227 	else {
   10228 		/*
   10229 		 * Use Alternative Page Select register to access registers
   10230 		 * 30 and 31.
   10231 		 */
   10232 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10233 	}
   10234 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10235 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10236 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10237 		/*
   10238 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10239 		 * register.
   10240 		 */
   10241 		delay(200);
   10242 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10243 			device_printf(dev, "%s failed\n", __func__);
   10244 			goto out;
   10245 		}
   10246 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10247 		delay(200);
   10248 	} else
   10249 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10250 
   10251 out:
   10252 	sc->phy.release(sc);
   10253 }
   10254 
   10255 /*
   10256  * wm_gmii_bm_readreg:	[mii interface function]
   10257  *
   10258  *	Read a PHY register on the kumeran
   10259  * This could be handled by the PHY layer if we didn't have to lock the
   10260  * ressource ...
   10261  */
   10262 static int
   10263 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10264 {
   10265 	struct wm_softc *sc = device_private(dev);
   10266 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10267 	uint16_t val;
   10268 	int rv;
   10269 
   10270 	if (sc->phy.acquire(sc)) {
   10271 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10272 		return 0;
   10273 	}
   10274 
   10275 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10276 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10277 		    || (reg == 31)) ? 1 : phy;
   10278 	/* Page 800 works differently than the rest so it has its own func */
   10279 	if (page == BM_WUC_PAGE) {
   10280 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10281 		rv = val;
   10282 		goto release;
   10283 	}
   10284 
   10285 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10286 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10287 		    && (sc->sc_type != WM_T_82583))
   10288 			wm_gmii_mdic_writereg(dev, phy,
   10289 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10290 		else
   10291 			wm_gmii_mdic_writereg(dev, phy,
   10292 			    BME1000_PHY_PAGE_SELECT, page);
   10293 	}
   10294 
   10295 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10296 
   10297 release:
   10298 	sc->phy.release(sc);
   10299 	return rv;
   10300 }
   10301 
   10302 /*
   10303  * wm_gmii_bm_writereg:	[mii interface function]
   10304  *
   10305  *	Write a PHY register on the kumeran.
   10306  * This could be handled by the PHY layer if we didn't have to lock the
   10307  * ressource ...
   10308  */
   10309 static void
   10310 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10311 {
   10312 	struct wm_softc *sc = device_private(dev);
   10313 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10314 
   10315 	if (sc->phy.acquire(sc)) {
   10316 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10317 		return;
   10318 	}
   10319 
   10320 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10321 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10322 		    || (reg == 31)) ? 1 : phy;
   10323 	/* Page 800 works differently than the rest so it has its own func */
   10324 	if (page == BM_WUC_PAGE) {
   10325 		uint16_t tmp;
   10326 
   10327 		tmp = val;
   10328 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10329 		goto release;
   10330 	}
   10331 
   10332 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10333 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10334 		    && (sc->sc_type != WM_T_82583))
   10335 			wm_gmii_mdic_writereg(dev, phy,
   10336 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10337 		else
   10338 			wm_gmii_mdic_writereg(dev, phy,
   10339 			    BME1000_PHY_PAGE_SELECT, page);
   10340 	}
   10341 
   10342 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10343 
   10344 release:
   10345 	sc->phy.release(sc);
   10346 }
   10347 
   10348 static void
   10349 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10350 {
   10351 	struct wm_softc *sc = device_private(dev);
   10352 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10353 	uint16_t wuce, reg;
   10354 
   10355 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10356 		device_xname(dev), __func__));
   10357 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10358 	if (sc->sc_type == WM_T_PCH) {
   10359 		/* XXX e1000 driver do nothing... why? */
   10360 	}
   10361 
   10362 	/*
   10363 	 * 1) Enable PHY wakeup register first.
   10364 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10365 	 */
   10366 
   10367 	/* Set page 769 */
   10368 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10369 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10370 
   10371 	/* Read WUCE and save it */
   10372 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10373 
   10374 	reg = wuce | BM_WUC_ENABLE_BIT;
   10375 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10376 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10377 
   10378 	/* Select page 800 */
   10379 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10380 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10381 
   10382 	/*
   10383 	 * 2) Access PHY wakeup register.
   10384 	 * See e1000_access_phy_wakeup_reg_bm.
   10385 	 */
   10386 
   10387 	/* Write page 800 */
   10388 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10389 
   10390 	if (rd)
   10391 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10392 	else
   10393 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10394 
   10395 	/*
   10396 	 * 3) Disable PHY wakeup register.
   10397 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10398 	 */
   10399 	/* Set page 769 */
   10400 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10401 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10402 
   10403 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10404 }
   10405 
   10406 /*
   10407  * wm_gmii_hv_readreg:	[mii interface function]
   10408  *
   10409  *	Read a PHY register on the kumeran
   10410  * This could be handled by the PHY layer if we didn't have to lock the
   10411  * ressource ...
   10412  */
   10413 static int
   10414 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10415 {
   10416 	struct wm_softc *sc = device_private(dev);
   10417 	int rv;
   10418 
   10419 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10420 		device_xname(dev), __func__));
   10421 	if (sc->phy.acquire(sc)) {
   10422 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10423 		return 0;
   10424 	}
   10425 
   10426 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10427 	sc->phy.release(sc);
   10428 	return rv;
   10429 }
   10430 
   10431 static int
   10432 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10433 {
   10434 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10435 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10436 	uint16_t val;
   10437 	int rv;
   10438 
   10439 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10440 
   10441 	/* Page 800 works differently than the rest so it has its own func */
   10442 	if (page == BM_WUC_PAGE) {
   10443 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10444 		return val;
   10445 	}
   10446 
   10447 	/*
   10448 	 * Lower than page 768 works differently than the rest so it has its
   10449 	 * own func
   10450 	 */
   10451 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10452 		printf("gmii_hv_readreg!!!\n");
   10453 		return 0;
   10454 	}
   10455 
   10456 	/*
   10457 	 * XXX I21[789] documents say that the SMBus Address register is at
   10458 	 * PHY address 01, Page 0 (not 768), Register 26.
   10459 	 */
   10460 	if (page == HV_INTC_FC_PAGE_START)
   10461 		page = 0;
   10462 
   10463 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10464 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10465 		    page << BME1000_PAGE_SHIFT);
   10466 	}
   10467 
   10468 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10469 	return rv;
   10470 }
   10471 
   10472 /*
   10473  * wm_gmii_hv_writereg:	[mii interface function]
   10474  *
   10475  *	Write a PHY register on the kumeran.
   10476  * This could be handled by the PHY layer if we didn't have to lock the
   10477  * ressource ...
   10478  */
   10479 static void
   10480 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10481 {
   10482 	struct wm_softc *sc = device_private(dev);
   10483 
   10484 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10485 		device_xname(dev), __func__));
   10486 
   10487 	if (sc->phy.acquire(sc)) {
   10488 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10489 		return;
   10490 	}
   10491 
   10492 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10493 	sc->phy.release(sc);
   10494 }
   10495 
   10496 static void
   10497 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10498 {
   10499 	struct wm_softc *sc = device_private(dev);
   10500 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10501 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10502 
   10503 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10504 
   10505 	/* Page 800 works differently than the rest so it has its own func */
   10506 	if (page == BM_WUC_PAGE) {
   10507 		uint16_t tmp;
   10508 
   10509 		tmp = val;
   10510 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10511 		return;
   10512 	}
   10513 
   10514 	/*
   10515 	 * Lower than page 768 works differently than the rest so it has its
   10516 	 * own func
   10517 	 */
   10518 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10519 		printf("gmii_hv_writereg!!!\n");
   10520 		return;
   10521 	}
   10522 
   10523 	{
   10524 		/*
   10525 		 * XXX I21[789] documents say that the SMBus Address register
   10526 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10527 		 */
   10528 		if (page == HV_INTC_FC_PAGE_START)
   10529 			page = 0;
   10530 
   10531 		/*
   10532 		 * XXX Workaround MDIO accesses being disabled after entering
   10533 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10534 		 * register is set)
   10535 		 */
   10536 		if (sc->sc_phytype == WMPHY_82578) {
   10537 			struct mii_softc *child;
   10538 
   10539 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10540 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10541 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10542 			    && ((val & (1 << 11)) != 0)) {
   10543 				printf("XXX need workaround\n");
   10544 			}
   10545 		}
   10546 
   10547 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10548 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10549 			    page << BME1000_PAGE_SHIFT);
   10550 		}
   10551 	}
   10552 
   10553 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10554 }
   10555 
   10556 /*
   10557  * wm_gmii_82580_readreg:	[mii interface function]
   10558  *
   10559  *	Read a PHY register on the 82580 and I350.
   10560  * This could be handled by the PHY layer if we didn't have to lock the
   10561  * ressource ...
   10562  */
   10563 static int
   10564 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10565 {
   10566 	struct wm_softc *sc = device_private(dev);
   10567 	int rv;
   10568 
   10569 	if (sc->phy.acquire(sc) != 0) {
   10570 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10571 		return 0;
   10572 	}
   10573 
   10574 #ifdef DIAGNOSTIC
   10575 	if (reg > MII_ADDRMASK) {
   10576 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10577 		    __func__, sc->sc_phytype, reg);
   10578 		reg &= MII_ADDRMASK;
   10579 	}
   10580 #endif
   10581 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10582 
   10583 	sc->phy.release(sc);
   10584 	return rv;
   10585 }
   10586 
   10587 /*
   10588  * wm_gmii_82580_writereg:	[mii interface function]
   10589  *
   10590  *	Write a PHY register on the 82580 and I350.
   10591  * This could be handled by the PHY layer if we didn't have to lock the
   10592  * ressource ...
   10593  */
   10594 static void
   10595 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10596 {
   10597 	struct wm_softc *sc = device_private(dev);
   10598 
   10599 	if (sc->phy.acquire(sc) != 0) {
   10600 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10601 		return;
   10602 	}
   10603 
   10604 #ifdef DIAGNOSTIC
   10605 	if (reg > MII_ADDRMASK) {
   10606 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10607 		    __func__, sc->sc_phytype, reg);
   10608 		reg &= MII_ADDRMASK;
   10609 	}
   10610 #endif
   10611 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10612 
   10613 	sc->phy.release(sc);
   10614 }
   10615 
   10616 /*
   10617  * wm_gmii_gs40g_readreg:	[mii interface function]
   10618  *
   10619  *	Read a PHY register on the I2100 and I211.
   10620  * This could be handled by the PHY layer if we didn't have to lock the
   10621  * ressource ...
   10622  */
   10623 static int
   10624 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10625 {
   10626 	struct wm_softc *sc = device_private(dev);
   10627 	int page, offset;
   10628 	int rv;
   10629 
   10630 	/* Acquire semaphore */
   10631 	if (sc->phy.acquire(sc)) {
   10632 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10633 		return 0;
   10634 	}
   10635 
   10636 	/* Page select */
   10637 	page = reg >> GS40G_PAGE_SHIFT;
   10638 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10639 
   10640 	/* Read reg */
   10641 	offset = reg & GS40G_OFFSET_MASK;
   10642 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10643 
   10644 	sc->phy.release(sc);
   10645 	return rv;
   10646 }
   10647 
   10648 /*
   10649  * wm_gmii_gs40g_writereg:	[mii interface function]
   10650  *
   10651  *	Write a PHY register on the I210 and I211.
   10652  * This could be handled by the PHY layer if we didn't have to lock the
   10653  * ressource ...
   10654  */
   10655 static void
   10656 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10657 {
   10658 	struct wm_softc *sc = device_private(dev);
   10659 	int page, offset;
   10660 
   10661 	/* Acquire semaphore */
   10662 	if (sc->phy.acquire(sc)) {
   10663 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10664 		return;
   10665 	}
   10666 
   10667 	/* Page select */
   10668 	page = reg >> GS40G_PAGE_SHIFT;
   10669 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10670 
   10671 	/* Write reg */
   10672 	offset = reg & GS40G_OFFSET_MASK;
   10673 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10674 
   10675 	/* Release semaphore */
   10676 	sc->phy.release(sc);
   10677 }
   10678 
   10679 /*
   10680  * wm_gmii_statchg:	[mii interface function]
   10681  *
   10682  *	Callback from MII layer when media changes.
   10683  */
   10684 static void
   10685 wm_gmii_statchg(struct ifnet *ifp)
   10686 {
   10687 	struct wm_softc *sc = ifp->if_softc;
   10688 	struct mii_data *mii = &sc->sc_mii;
   10689 
   10690 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10691 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10692 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10693 
   10694 	/*
   10695 	 * Get flow control negotiation result.
   10696 	 */
   10697 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10698 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10699 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10700 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10701 	}
   10702 
   10703 	if (sc->sc_flowflags & IFM_FLOW) {
   10704 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10705 			sc->sc_ctrl |= CTRL_TFCE;
   10706 			sc->sc_fcrtl |= FCRTL_XONE;
   10707 		}
   10708 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10709 			sc->sc_ctrl |= CTRL_RFCE;
   10710 	}
   10711 
   10712 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10713 		DPRINTF(WM_DEBUG_LINK,
   10714 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10715 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10716 	} else {
   10717 		DPRINTF(WM_DEBUG_LINK,
   10718 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10719 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10720 	}
   10721 
   10722 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10723 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10724 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10725 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10726 	if (sc->sc_type == WM_T_80003) {
   10727 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10728 		case IFM_1000_T:
   10729 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10730 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10731 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10732 			break;
   10733 		default:
   10734 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10735 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10736 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10737 			break;
   10738 		}
   10739 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10740 	}
   10741 }
   10742 
   10743 /* kumeran related (80003, ICH* and PCH*) */
   10744 
   10745 /*
   10746  * wm_kmrn_readreg:
   10747  *
   10748  *	Read a kumeran register
   10749  */
   10750 static int
   10751 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10752 {
   10753 	int rv;
   10754 
   10755 	if (sc->sc_type == WM_T_80003)
   10756 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10757 	else
   10758 		rv = sc->phy.acquire(sc);
   10759 	if (rv != 0) {
   10760 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10761 		    __func__);
   10762 		return rv;
   10763 	}
   10764 
   10765 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10766 
   10767 	if (sc->sc_type == WM_T_80003)
   10768 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10769 	else
   10770 		sc->phy.release(sc);
   10771 
   10772 	return rv;
   10773 }
   10774 
   10775 static int
   10776 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10777 {
   10778 
   10779 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10780 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10781 	    KUMCTRLSTA_REN);
   10782 	CSR_WRITE_FLUSH(sc);
   10783 	delay(2);
   10784 
   10785 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10786 
   10787 	return 0;
   10788 }
   10789 
   10790 /*
   10791  * wm_kmrn_writereg:
   10792  *
   10793  *	Write a kumeran register
   10794  */
   10795 static int
   10796 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10797 {
   10798 	int rv;
   10799 
   10800 	if (sc->sc_type == WM_T_80003)
   10801 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10802 	else
   10803 		rv = sc->phy.acquire(sc);
   10804 	if (rv != 0) {
   10805 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10806 		    __func__);
   10807 		return rv;
   10808 	}
   10809 
   10810 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10811 
   10812 	if (sc->sc_type == WM_T_80003)
   10813 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10814 	else
   10815 		sc->phy.release(sc);
   10816 
   10817 	return rv;
   10818 }
   10819 
   10820 static int
   10821 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10822 {
   10823 
   10824 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10825 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10826 
   10827 	return 0;
   10828 }
   10829 
   10830 /* SGMII related */
   10831 
   10832 /*
   10833  * wm_sgmii_uses_mdio
   10834  *
   10835  * Check whether the transaction is to the internal PHY or the external
   10836  * MDIO interface. Return true if it's MDIO.
   10837  */
   10838 static bool
   10839 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10840 {
   10841 	uint32_t reg;
   10842 	bool ismdio = false;
   10843 
   10844 	switch (sc->sc_type) {
   10845 	case WM_T_82575:
   10846 	case WM_T_82576:
   10847 		reg = CSR_READ(sc, WMREG_MDIC);
   10848 		ismdio = ((reg & MDIC_DEST) != 0);
   10849 		break;
   10850 	case WM_T_82580:
   10851 	case WM_T_I350:
   10852 	case WM_T_I354:
   10853 	case WM_T_I210:
   10854 	case WM_T_I211:
   10855 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10856 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10857 		break;
   10858 	default:
   10859 		break;
   10860 	}
   10861 
   10862 	return ismdio;
   10863 }
   10864 
   10865 /*
   10866  * wm_sgmii_readreg:	[mii interface function]
   10867  *
   10868  *	Read a PHY register on the SGMII
   10869  * This could be handled by the PHY layer if we didn't have to lock the
   10870  * ressource ...
   10871  */
   10872 static int
   10873 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10874 {
   10875 	struct wm_softc *sc = device_private(dev);
   10876 	uint32_t i2ccmd;
   10877 	int i, rv;
   10878 
   10879 	if (sc->phy.acquire(sc)) {
   10880 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10881 		return 0;
   10882 	}
   10883 
   10884 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10885 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10886 	    | I2CCMD_OPCODE_READ;
   10887 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10888 
   10889 	/* Poll the ready bit */
   10890 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10891 		delay(50);
   10892 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10893 		if (i2ccmd & I2CCMD_READY)
   10894 			break;
   10895 	}
   10896 	if ((i2ccmd & I2CCMD_READY) == 0)
   10897 		device_printf(dev, "I2CCMD Read did not complete\n");
   10898 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10899 		device_printf(dev, "I2CCMD Error bit set\n");
   10900 
   10901 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10902 
   10903 	sc->phy.release(sc);
   10904 	return rv;
   10905 }
   10906 
   10907 /*
   10908  * wm_sgmii_writereg:	[mii interface function]
   10909  *
   10910  *	Write a PHY register on the SGMII.
   10911  * This could be handled by the PHY layer if we didn't have to lock the
   10912  * ressource ...
   10913  */
   10914 static void
   10915 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10916 {
   10917 	struct wm_softc *sc = device_private(dev);
   10918 	uint32_t i2ccmd;
   10919 	int i;
   10920 	int val_swapped;
   10921 
   10922 	if (sc->phy.acquire(sc) != 0) {
   10923 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10924 		return;
   10925 	}
   10926 	/* Swap the data bytes for the I2C interface */
   10927 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10928 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10929 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10930 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10931 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10932 
   10933 	/* Poll the ready bit */
   10934 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10935 		delay(50);
   10936 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10937 		if (i2ccmd & I2CCMD_READY)
   10938 			break;
   10939 	}
   10940 	if ((i2ccmd & I2CCMD_READY) == 0)
   10941 		device_printf(dev, "I2CCMD Write did not complete\n");
   10942 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10943 		device_printf(dev, "I2CCMD Error bit set\n");
   10944 
   10945 	sc->phy.release(sc);
   10946 }
   10947 
   10948 /* TBI related */
   10949 
   10950 /*
   10951  * wm_tbi_mediainit:
   10952  *
   10953  *	Initialize media for use on 1000BASE-X devices.
   10954  */
   10955 static void
   10956 wm_tbi_mediainit(struct wm_softc *sc)
   10957 {
   10958 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10959 	const char *sep = "";
   10960 
   10961 	if (sc->sc_type < WM_T_82543)
   10962 		sc->sc_tipg = TIPG_WM_DFLT;
   10963 	else
   10964 		sc->sc_tipg = TIPG_LG_DFLT;
   10965 
   10966 	sc->sc_tbi_serdes_anegticks = 5;
   10967 
   10968 	/* Initialize our media structures */
   10969 	sc->sc_mii.mii_ifp = ifp;
   10970 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10971 
   10972 	if ((sc->sc_type >= WM_T_82575)
   10973 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10974 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10975 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10976 	else
   10977 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10978 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10979 
   10980 	/*
   10981 	 * SWD Pins:
   10982 	 *
   10983 	 *	0 = Link LED (output)
   10984 	 *	1 = Loss Of Signal (input)
   10985 	 */
   10986 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10987 
   10988 	/* XXX Perhaps this is only for TBI */
   10989 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10990 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10991 
   10992 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10993 		sc->sc_ctrl &= ~CTRL_LRST;
   10994 
   10995 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10996 
   10997 #define	ADD(ss, mm, dd)							\
   10998 do {									\
   10999 	aprint_normal("%s%s", sep, ss);					\
   11000 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11001 	sep = ", ";							\
   11002 } while (/*CONSTCOND*/0)
   11003 
   11004 	aprint_normal_dev(sc->sc_dev, "");
   11005 
   11006 	if (sc->sc_type == WM_T_I354) {
   11007 		uint32_t status;
   11008 
   11009 		status = CSR_READ(sc, WMREG_STATUS);
   11010 		if (((status & STATUS_2P5_SKU) != 0)
   11011 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11012 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11013 		} else
   11014 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11015 	} else if (sc->sc_type == WM_T_82545) {
   11016 		/* Only 82545 is LX (XXX except SFP) */
   11017 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11018 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11019 	} else {
   11020 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11021 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11022 	}
   11023 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11024 	aprint_normal("\n");
   11025 
   11026 #undef ADD
   11027 
   11028 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11029 }
   11030 
   11031 /*
   11032  * wm_tbi_mediachange:	[ifmedia interface function]
   11033  *
   11034  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11035  */
   11036 static int
   11037 wm_tbi_mediachange(struct ifnet *ifp)
   11038 {
   11039 	struct wm_softc *sc = ifp->if_softc;
   11040 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11041 	uint32_t status;
   11042 	int i;
   11043 
   11044 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11045 		/* XXX need some work for >= 82571 and < 82575 */
   11046 		if (sc->sc_type < WM_T_82575)
   11047 			return 0;
   11048 	}
   11049 
   11050 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11051 	    || (sc->sc_type >= WM_T_82575))
   11052 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11053 
   11054 	sc->sc_ctrl &= ~CTRL_LRST;
   11055 	sc->sc_txcw = TXCW_ANE;
   11056 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11057 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11058 	else if (ife->ifm_media & IFM_FDX)
   11059 		sc->sc_txcw |= TXCW_FD;
   11060 	else
   11061 		sc->sc_txcw |= TXCW_HD;
   11062 
   11063 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11064 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11065 
   11066 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11067 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11068 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11069 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11070 	CSR_WRITE_FLUSH(sc);
   11071 	delay(1000);
   11072 
   11073 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11074 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11075 
   11076 	/*
   11077 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11078 	 * optics detect a signal, 0 if they don't.
   11079 	 */
   11080 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11081 		/* Have signal; wait for the link to come up. */
   11082 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11083 			delay(10000);
   11084 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11085 				break;
   11086 		}
   11087 
   11088 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11089 			    device_xname(sc->sc_dev),i));
   11090 
   11091 		status = CSR_READ(sc, WMREG_STATUS);
   11092 		DPRINTF(WM_DEBUG_LINK,
   11093 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11094 			device_xname(sc->sc_dev),status, STATUS_LU));
   11095 		if (status & STATUS_LU) {
   11096 			/* Link is up. */
   11097 			DPRINTF(WM_DEBUG_LINK,
   11098 			    ("%s: LINK: set media -> link up %s\n",
   11099 			    device_xname(sc->sc_dev),
   11100 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11101 
   11102 			/*
   11103 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11104 			 * so we should update sc->sc_ctrl
   11105 			 */
   11106 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11107 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11108 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11109 			if (status & STATUS_FD)
   11110 				sc->sc_tctl |=
   11111 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11112 			else
   11113 				sc->sc_tctl |=
   11114 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11115 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11116 				sc->sc_fcrtl |= FCRTL_XONE;
   11117 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11118 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11119 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11120 				      sc->sc_fcrtl);
   11121 			sc->sc_tbi_linkup = 1;
   11122 		} else {
   11123 			if (i == WM_LINKUP_TIMEOUT)
   11124 				wm_check_for_link(sc);
   11125 			/* Link is down. */
   11126 			DPRINTF(WM_DEBUG_LINK,
   11127 			    ("%s: LINK: set media -> link down\n",
   11128 			    device_xname(sc->sc_dev)));
   11129 			sc->sc_tbi_linkup = 0;
   11130 		}
   11131 	} else {
   11132 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11133 		    device_xname(sc->sc_dev)));
   11134 		sc->sc_tbi_linkup = 0;
   11135 	}
   11136 
   11137 	wm_tbi_serdes_set_linkled(sc);
   11138 
   11139 	return 0;
   11140 }
   11141 
   11142 /*
   11143  * wm_tbi_mediastatus:	[ifmedia interface function]
   11144  *
   11145  *	Get the current interface media status on a 1000BASE-X device.
   11146  */
   11147 static void
   11148 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11149 {
   11150 	struct wm_softc *sc = ifp->if_softc;
   11151 	uint32_t ctrl, status;
   11152 
   11153 	ifmr->ifm_status = IFM_AVALID;
   11154 	ifmr->ifm_active = IFM_ETHER;
   11155 
   11156 	status = CSR_READ(sc, WMREG_STATUS);
   11157 	if ((status & STATUS_LU) == 0) {
   11158 		ifmr->ifm_active |= IFM_NONE;
   11159 		return;
   11160 	}
   11161 
   11162 	ifmr->ifm_status |= IFM_ACTIVE;
   11163 	/* Only 82545 is LX */
   11164 	if (sc->sc_type == WM_T_82545)
   11165 		ifmr->ifm_active |= IFM_1000_LX;
   11166 	else
   11167 		ifmr->ifm_active |= IFM_1000_SX;
   11168 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11169 		ifmr->ifm_active |= IFM_FDX;
   11170 	else
   11171 		ifmr->ifm_active |= IFM_HDX;
   11172 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11173 	if (ctrl & CTRL_RFCE)
   11174 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11175 	if (ctrl & CTRL_TFCE)
   11176 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11177 }
   11178 
   11179 /* XXX TBI only */
   11180 static int
   11181 wm_check_for_link(struct wm_softc *sc)
   11182 {
   11183 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11184 	uint32_t rxcw;
   11185 	uint32_t ctrl;
   11186 	uint32_t status;
   11187 	uint32_t sig;
   11188 
   11189 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11190 		/* XXX need some work for >= 82571 */
   11191 		if (sc->sc_type >= WM_T_82571) {
   11192 			sc->sc_tbi_linkup = 1;
   11193 			return 0;
   11194 		}
   11195 	}
   11196 
   11197 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11198 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11199 	status = CSR_READ(sc, WMREG_STATUS);
   11200 
   11201 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11202 
   11203 	DPRINTF(WM_DEBUG_LINK,
   11204 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11205 		device_xname(sc->sc_dev), __func__,
   11206 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11207 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11208 
   11209 	/*
   11210 	 * SWDPIN   LU RXCW
   11211 	 *      0    0    0
   11212 	 *      0    0    1	(should not happen)
   11213 	 *      0    1    0	(should not happen)
   11214 	 *      0    1    1	(should not happen)
   11215 	 *      1    0    0	Disable autonego and force linkup
   11216 	 *      1    0    1	got /C/ but not linkup yet
   11217 	 *      1    1    0	(linkup)
   11218 	 *      1    1    1	If IFM_AUTO, back to autonego
   11219 	 *
   11220 	 */
   11221 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11222 	    && ((status & STATUS_LU) == 0)
   11223 	    && ((rxcw & RXCW_C) == 0)) {
   11224 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11225 			__func__));
   11226 		sc->sc_tbi_linkup = 0;
   11227 		/* Disable auto-negotiation in the TXCW register */
   11228 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11229 
   11230 		/*
   11231 		 * Force link-up and also force full-duplex.
   11232 		 *
   11233 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11234 		 * so we should update sc->sc_ctrl
   11235 		 */
   11236 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11237 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11238 	} else if (((status & STATUS_LU) != 0)
   11239 	    && ((rxcw & RXCW_C) != 0)
   11240 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11241 		sc->sc_tbi_linkup = 1;
   11242 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11243 			__func__));
   11244 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11245 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11246 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11247 	    && ((rxcw & RXCW_C) != 0)) {
   11248 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11249 	} else {
   11250 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11251 			status));
   11252 	}
   11253 
   11254 	return 0;
   11255 }
   11256 
   11257 /*
   11258  * wm_tbi_tick:
   11259  *
   11260  *	Check the link on TBI devices.
   11261  *	This function acts as mii_tick().
   11262  */
   11263 static void
   11264 wm_tbi_tick(struct wm_softc *sc)
   11265 {
   11266 	struct mii_data *mii = &sc->sc_mii;
   11267 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11268 	uint32_t status;
   11269 
   11270 	KASSERT(WM_CORE_LOCKED(sc));
   11271 
   11272 	status = CSR_READ(sc, WMREG_STATUS);
   11273 
   11274 	/* XXX is this needed? */
   11275 	(void)CSR_READ(sc, WMREG_RXCW);
   11276 	(void)CSR_READ(sc, WMREG_CTRL);
   11277 
   11278 	/* set link status */
   11279 	if ((status & STATUS_LU) == 0) {
   11280 		DPRINTF(WM_DEBUG_LINK,
   11281 		    ("%s: LINK: checklink -> down\n",
   11282 			device_xname(sc->sc_dev)));
   11283 		sc->sc_tbi_linkup = 0;
   11284 	} else if (sc->sc_tbi_linkup == 0) {
   11285 		DPRINTF(WM_DEBUG_LINK,
   11286 		    ("%s: LINK: checklink -> up %s\n",
   11287 			device_xname(sc->sc_dev),
   11288 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11289 		sc->sc_tbi_linkup = 1;
   11290 		sc->sc_tbi_serdes_ticks = 0;
   11291 	}
   11292 
   11293 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11294 		goto setled;
   11295 
   11296 	if ((status & STATUS_LU) == 0) {
   11297 		sc->sc_tbi_linkup = 0;
   11298 		/* If the timer expired, retry autonegotiation */
   11299 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11300 		    && (++sc->sc_tbi_serdes_ticks
   11301 			>= sc->sc_tbi_serdes_anegticks)) {
   11302 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11303 			sc->sc_tbi_serdes_ticks = 0;
   11304 			/*
   11305 			 * Reset the link, and let autonegotiation do
   11306 			 * its thing
   11307 			 */
   11308 			sc->sc_ctrl |= CTRL_LRST;
   11309 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11310 			CSR_WRITE_FLUSH(sc);
   11311 			delay(1000);
   11312 			sc->sc_ctrl &= ~CTRL_LRST;
   11313 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11314 			CSR_WRITE_FLUSH(sc);
   11315 			delay(1000);
   11316 			CSR_WRITE(sc, WMREG_TXCW,
   11317 			    sc->sc_txcw & ~TXCW_ANE);
   11318 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11319 		}
   11320 	}
   11321 
   11322 setled:
   11323 	wm_tbi_serdes_set_linkled(sc);
   11324 }
   11325 
   11326 /* SERDES related */
   11327 static void
   11328 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11329 {
   11330 	uint32_t reg;
   11331 
   11332 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11333 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11334 		return;
   11335 
   11336 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11337 	reg |= PCS_CFG_PCS_EN;
   11338 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11339 
   11340 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11341 	reg &= ~CTRL_EXT_SWDPIN(3);
   11342 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11343 	CSR_WRITE_FLUSH(sc);
   11344 }
   11345 
   11346 static int
   11347 wm_serdes_mediachange(struct ifnet *ifp)
   11348 {
   11349 	struct wm_softc *sc = ifp->if_softc;
   11350 	bool pcs_autoneg = true; /* XXX */
   11351 	uint32_t ctrl_ext, pcs_lctl, reg;
   11352 
   11353 	/* XXX Currently, this function is not called on 8257[12] */
   11354 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11355 	    || (sc->sc_type >= WM_T_82575))
   11356 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11357 
   11358 	wm_serdes_power_up_link_82575(sc);
   11359 
   11360 	sc->sc_ctrl |= CTRL_SLU;
   11361 
   11362 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11363 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11364 
   11365 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11366 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11367 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11368 	case CTRL_EXT_LINK_MODE_SGMII:
   11369 		pcs_autoneg = true;
   11370 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11371 		break;
   11372 	case CTRL_EXT_LINK_MODE_1000KX:
   11373 		pcs_autoneg = false;
   11374 		/* FALLTHROUGH */
   11375 	default:
   11376 		if ((sc->sc_type == WM_T_82575)
   11377 		    || (sc->sc_type == WM_T_82576)) {
   11378 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11379 				pcs_autoneg = false;
   11380 		}
   11381 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11382 		    | CTRL_FRCFDX;
   11383 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11384 	}
   11385 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11386 
   11387 	if (pcs_autoneg) {
   11388 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11389 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11390 
   11391 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11392 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11393 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11394 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11395 	} else
   11396 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11397 
   11398 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11399 
   11400 
   11401 	return 0;
   11402 }
   11403 
   11404 static void
   11405 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11406 {
   11407 	struct wm_softc *sc = ifp->if_softc;
   11408 	struct mii_data *mii = &sc->sc_mii;
   11409 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11410 	uint32_t pcs_adv, pcs_lpab, reg;
   11411 
   11412 	ifmr->ifm_status = IFM_AVALID;
   11413 	ifmr->ifm_active = IFM_ETHER;
   11414 
   11415 	/* Check PCS */
   11416 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11417 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11418 		ifmr->ifm_active |= IFM_NONE;
   11419 		sc->sc_tbi_linkup = 0;
   11420 		goto setled;
   11421 	}
   11422 
   11423 	sc->sc_tbi_linkup = 1;
   11424 	ifmr->ifm_status |= IFM_ACTIVE;
   11425 	if (sc->sc_type == WM_T_I354) {
   11426 		uint32_t status;
   11427 
   11428 		status = CSR_READ(sc, WMREG_STATUS);
   11429 		if (((status & STATUS_2P5_SKU) != 0)
   11430 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11431 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11432 		} else
   11433 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11434 	} else {
   11435 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11436 		case PCS_LSTS_SPEED_10:
   11437 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11438 			break;
   11439 		case PCS_LSTS_SPEED_100:
   11440 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11441 			break;
   11442 		case PCS_LSTS_SPEED_1000:
   11443 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11444 			break;
   11445 		default:
   11446 			device_printf(sc->sc_dev, "Unknown speed\n");
   11447 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11448 			break;
   11449 		}
   11450 	}
   11451 	if ((reg & PCS_LSTS_FDX) != 0)
   11452 		ifmr->ifm_active |= IFM_FDX;
   11453 	else
   11454 		ifmr->ifm_active |= IFM_HDX;
   11455 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11456 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11457 		/* Check flow */
   11458 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11459 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11460 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11461 			goto setled;
   11462 		}
   11463 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11464 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11465 		DPRINTF(WM_DEBUG_LINK,
   11466 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11467 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11468 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11469 			mii->mii_media_active |= IFM_FLOW
   11470 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11471 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11472 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11473 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11474 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11475 			mii->mii_media_active |= IFM_FLOW
   11476 			    | IFM_ETH_TXPAUSE;
   11477 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11478 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11479 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11480 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11481 			mii->mii_media_active |= IFM_FLOW
   11482 			    | IFM_ETH_RXPAUSE;
   11483 		}
   11484 	}
   11485 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11486 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11487 setled:
   11488 	wm_tbi_serdes_set_linkled(sc);
   11489 }
   11490 
   11491 /*
   11492  * wm_serdes_tick:
   11493  *
   11494  *	Check the link on serdes devices.
   11495  */
   11496 static void
   11497 wm_serdes_tick(struct wm_softc *sc)
   11498 {
   11499 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11500 	struct mii_data *mii = &sc->sc_mii;
   11501 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11502 	uint32_t reg;
   11503 
   11504 	KASSERT(WM_CORE_LOCKED(sc));
   11505 
   11506 	mii->mii_media_status = IFM_AVALID;
   11507 	mii->mii_media_active = IFM_ETHER;
   11508 
   11509 	/* Check PCS */
   11510 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11511 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11512 		mii->mii_media_status |= IFM_ACTIVE;
   11513 		sc->sc_tbi_linkup = 1;
   11514 		sc->sc_tbi_serdes_ticks = 0;
   11515 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11516 		if ((reg & PCS_LSTS_FDX) != 0)
   11517 			mii->mii_media_active |= IFM_FDX;
   11518 		else
   11519 			mii->mii_media_active |= IFM_HDX;
   11520 	} else {
   11521 		mii->mii_media_status |= IFM_NONE;
   11522 		sc->sc_tbi_linkup = 0;
   11523 		/* If the timer expired, retry autonegotiation */
   11524 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11525 		    && (++sc->sc_tbi_serdes_ticks
   11526 			>= sc->sc_tbi_serdes_anegticks)) {
   11527 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11528 			sc->sc_tbi_serdes_ticks = 0;
   11529 			/* XXX */
   11530 			wm_serdes_mediachange(ifp);
   11531 		}
   11532 	}
   11533 
   11534 	wm_tbi_serdes_set_linkled(sc);
   11535 }
   11536 
   11537 /* SFP related */
   11538 
   11539 static int
   11540 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11541 {
   11542 	uint32_t i2ccmd;
   11543 	int i;
   11544 
   11545 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11546 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11547 
   11548 	/* Poll the ready bit */
   11549 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11550 		delay(50);
   11551 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11552 		if (i2ccmd & I2CCMD_READY)
   11553 			break;
   11554 	}
   11555 	if ((i2ccmd & I2CCMD_READY) == 0)
   11556 		return -1;
   11557 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11558 		return -1;
   11559 
   11560 	*data = i2ccmd & 0x00ff;
   11561 
   11562 	return 0;
   11563 }
   11564 
   11565 static uint32_t
   11566 wm_sfp_get_media_type(struct wm_softc *sc)
   11567 {
   11568 	uint32_t ctrl_ext;
   11569 	uint8_t val = 0;
   11570 	int timeout = 3;
   11571 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11572 	int rv = -1;
   11573 
   11574 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11575 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11576 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11577 	CSR_WRITE_FLUSH(sc);
   11578 
   11579 	/* Read SFP module data */
   11580 	while (timeout) {
   11581 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11582 		if (rv == 0)
   11583 			break;
   11584 		delay(100*1000); /* XXX too big */
   11585 		timeout--;
   11586 	}
   11587 	if (rv != 0)
   11588 		goto out;
   11589 	switch (val) {
   11590 	case SFF_SFP_ID_SFF:
   11591 		aprint_normal_dev(sc->sc_dev,
   11592 		    "Module/Connector soldered to board\n");
   11593 		break;
   11594 	case SFF_SFP_ID_SFP:
   11595 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11596 		break;
   11597 	case SFF_SFP_ID_UNKNOWN:
   11598 		goto out;
   11599 	default:
   11600 		break;
   11601 	}
   11602 
   11603 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11604 	if (rv != 0) {
   11605 		goto out;
   11606 	}
   11607 
   11608 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11609 		mediatype = WM_MEDIATYPE_SERDES;
   11610 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11611 		sc->sc_flags |= WM_F_SGMII;
   11612 		mediatype = WM_MEDIATYPE_COPPER;
   11613 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11614 		sc->sc_flags |= WM_F_SGMII;
   11615 		mediatype = WM_MEDIATYPE_SERDES;
   11616 	}
   11617 
   11618 out:
   11619 	/* Restore I2C interface setting */
   11620 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11621 
   11622 	return mediatype;
   11623 }
   11624 
   11625 /*
   11626  * NVM related.
   11627  * Microwire, SPI (w/wo EERD) and Flash.
   11628  */
   11629 
   11630 /* Both spi and uwire */
   11631 
   11632 /*
   11633  * wm_eeprom_sendbits:
   11634  *
   11635  *	Send a series of bits to the EEPROM.
   11636  */
   11637 static void
   11638 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11639 {
   11640 	uint32_t reg;
   11641 	int x;
   11642 
   11643 	reg = CSR_READ(sc, WMREG_EECD);
   11644 
   11645 	for (x = nbits; x > 0; x--) {
   11646 		if (bits & (1U << (x - 1)))
   11647 			reg |= EECD_DI;
   11648 		else
   11649 			reg &= ~EECD_DI;
   11650 		CSR_WRITE(sc, WMREG_EECD, reg);
   11651 		CSR_WRITE_FLUSH(sc);
   11652 		delay(2);
   11653 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11654 		CSR_WRITE_FLUSH(sc);
   11655 		delay(2);
   11656 		CSR_WRITE(sc, WMREG_EECD, reg);
   11657 		CSR_WRITE_FLUSH(sc);
   11658 		delay(2);
   11659 	}
   11660 }
   11661 
   11662 /*
   11663  * wm_eeprom_recvbits:
   11664  *
   11665  *	Receive a series of bits from the EEPROM.
   11666  */
   11667 static void
   11668 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11669 {
   11670 	uint32_t reg, val;
   11671 	int x;
   11672 
   11673 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11674 
   11675 	val = 0;
   11676 	for (x = nbits; x > 0; x--) {
   11677 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11678 		CSR_WRITE_FLUSH(sc);
   11679 		delay(2);
   11680 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11681 			val |= (1U << (x - 1));
   11682 		CSR_WRITE(sc, WMREG_EECD, reg);
   11683 		CSR_WRITE_FLUSH(sc);
   11684 		delay(2);
   11685 	}
   11686 	*valp = val;
   11687 }
   11688 
   11689 /* Microwire */
   11690 
   11691 /*
   11692  * wm_nvm_read_uwire:
   11693  *
   11694  *	Read a word from the EEPROM using the MicroWire protocol.
   11695  */
   11696 static int
   11697 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11698 {
   11699 	uint32_t reg, val;
   11700 	int i;
   11701 
   11702 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11703 		device_xname(sc->sc_dev), __func__));
   11704 
   11705 	if (sc->nvm.acquire(sc) != 0)
   11706 		return -1;
   11707 
   11708 	for (i = 0; i < wordcnt; i++) {
   11709 		/* Clear SK and DI. */
   11710 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11711 		CSR_WRITE(sc, WMREG_EECD, reg);
   11712 
   11713 		/*
   11714 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11715 		 * and Xen.
   11716 		 *
   11717 		 * We use this workaround only for 82540 because qemu's
   11718 		 * e1000 act as 82540.
   11719 		 */
   11720 		if (sc->sc_type == WM_T_82540) {
   11721 			reg |= EECD_SK;
   11722 			CSR_WRITE(sc, WMREG_EECD, reg);
   11723 			reg &= ~EECD_SK;
   11724 			CSR_WRITE(sc, WMREG_EECD, reg);
   11725 			CSR_WRITE_FLUSH(sc);
   11726 			delay(2);
   11727 		}
   11728 		/* XXX: end of workaround */
   11729 
   11730 		/* Set CHIP SELECT. */
   11731 		reg |= EECD_CS;
   11732 		CSR_WRITE(sc, WMREG_EECD, reg);
   11733 		CSR_WRITE_FLUSH(sc);
   11734 		delay(2);
   11735 
   11736 		/* Shift in the READ command. */
   11737 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11738 
   11739 		/* Shift in address. */
   11740 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11741 
   11742 		/* Shift out the data. */
   11743 		wm_eeprom_recvbits(sc, &val, 16);
   11744 		data[i] = val & 0xffff;
   11745 
   11746 		/* Clear CHIP SELECT. */
   11747 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11748 		CSR_WRITE(sc, WMREG_EECD, reg);
   11749 		CSR_WRITE_FLUSH(sc);
   11750 		delay(2);
   11751 	}
   11752 
   11753 	sc->nvm.release(sc);
   11754 	return 0;
   11755 }
   11756 
   11757 /* SPI */
   11758 
   11759 /*
   11760  * Set SPI and FLASH related information from the EECD register.
   11761  * For 82541 and 82547, the word size is taken from EEPROM.
   11762  */
   11763 static int
   11764 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11765 {
   11766 	int size;
   11767 	uint32_t reg;
   11768 	uint16_t data;
   11769 
   11770 	reg = CSR_READ(sc, WMREG_EECD);
   11771 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11772 
   11773 	/* Read the size of NVM from EECD by default */
   11774 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11775 	switch (sc->sc_type) {
   11776 	case WM_T_82541:
   11777 	case WM_T_82541_2:
   11778 	case WM_T_82547:
   11779 	case WM_T_82547_2:
   11780 		/* Set dummy value to access EEPROM */
   11781 		sc->sc_nvm_wordsize = 64;
   11782 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11783 			aprint_error_dev(sc->sc_dev,
   11784 			    "%s: failed to read EEPROM size\n", __func__);
   11785 		}
   11786 		reg = data;
   11787 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11788 		if (size == 0)
   11789 			size = 6; /* 64 word size */
   11790 		else
   11791 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11792 		break;
   11793 	case WM_T_80003:
   11794 	case WM_T_82571:
   11795 	case WM_T_82572:
   11796 	case WM_T_82573: /* SPI case */
   11797 	case WM_T_82574: /* SPI case */
   11798 	case WM_T_82583: /* SPI case */
   11799 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11800 		if (size > 14)
   11801 			size = 14;
   11802 		break;
   11803 	case WM_T_82575:
   11804 	case WM_T_82576:
   11805 	case WM_T_82580:
   11806 	case WM_T_I350:
   11807 	case WM_T_I354:
   11808 	case WM_T_I210:
   11809 	case WM_T_I211:
   11810 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11811 		if (size > 15)
   11812 			size = 15;
   11813 		break;
   11814 	default:
   11815 		aprint_error_dev(sc->sc_dev,
   11816 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11817 		return -1;
   11818 		break;
   11819 	}
   11820 
   11821 	sc->sc_nvm_wordsize = 1 << size;
   11822 
   11823 	return 0;
   11824 }
   11825 
   11826 /*
   11827  * wm_nvm_ready_spi:
   11828  *
   11829  *	Wait for a SPI EEPROM to be ready for commands.
   11830  */
   11831 static int
   11832 wm_nvm_ready_spi(struct wm_softc *sc)
   11833 {
   11834 	uint32_t val;
   11835 	int usec;
   11836 
   11837 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11838 		device_xname(sc->sc_dev), __func__));
   11839 
   11840 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11841 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11842 		wm_eeprom_recvbits(sc, &val, 8);
   11843 		if ((val & SPI_SR_RDY) == 0)
   11844 			break;
   11845 	}
   11846 	if (usec >= SPI_MAX_RETRIES) {
   11847 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11848 		return -1;
   11849 	}
   11850 	return 0;
   11851 }
   11852 
   11853 /*
   11854  * wm_nvm_read_spi:
   11855  *
   11856  *	Read a work from the EEPROM using the SPI protocol.
   11857  */
   11858 static int
   11859 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11860 {
   11861 	uint32_t reg, val;
   11862 	int i;
   11863 	uint8_t opc;
   11864 	int rv = 0;
   11865 
   11866 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11867 		device_xname(sc->sc_dev), __func__));
   11868 
   11869 	if (sc->nvm.acquire(sc) != 0)
   11870 		return -1;
   11871 
   11872 	/* Clear SK and CS. */
   11873 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11874 	CSR_WRITE(sc, WMREG_EECD, reg);
   11875 	CSR_WRITE_FLUSH(sc);
   11876 	delay(2);
   11877 
   11878 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11879 		goto out;
   11880 
   11881 	/* Toggle CS to flush commands. */
   11882 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11883 	CSR_WRITE_FLUSH(sc);
   11884 	delay(2);
   11885 	CSR_WRITE(sc, WMREG_EECD, reg);
   11886 	CSR_WRITE_FLUSH(sc);
   11887 	delay(2);
   11888 
   11889 	opc = SPI_OPC_READ;
   11890 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11891 		opc |= SPI_OPC_A8;
   11892 
   11893 	wm_eeprom_sendbits(sc, opc, 8);
   11894 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11895 
   11896 	for (i = 0; i < wordcnt; i++) {
   11897 		wm_eeprom_recvbits(sc, &val, 16);
   11898 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11899 	}
   11900 
   11901 	/* Raise CS and clear SK. */
   11902 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11903 	CSR_WRITE(sc, WMREG_EECD, reg);
   11904 	CSR_WRITE_FLUSH(sc);
   11905 	delay(2);
   11906 
   11907 out:
   11908 	sc->nvm.release(sc);
   11909 	return rv;
   11910 }
   11911 
   11912 /* Using with EERD */
   11913 
   11914 static int
   11915 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11916 {
   11917 	uint32_t attempts = 100000;
   11918 	uint32_t i, reg = 0;
   11919 	int32_t done = -1;
   11920 
   11921 	for (i = 0; i < attempts; i++) {
   11922 		reg = CSR_READ(sc, rw);
   11923 
   11924 		if (reg & EERD_DONE) {
   11925 			done = 0;
   11926 			break;
   11927 		}
   11928 		delay(5);
   11929 	}
   11930 
   11931 	return done;
   11932 }
   11933 
   11934 static int
   11935 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11936     uint16_t *data)
   11937 {
   11938 	int i, eerd = 0;
   11939 	int rv = 0;
   11940 
   11941 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11942 		device_xname(sc->sc_dev), __func__));
   11943 
   11944 	if (sc->nvm.acquire(sc) != 0)
   11945 		return -1;
   11946 
   11947 	for (i = 0; i < wordcnt; i++) {
   11948 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11949 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11950 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11951 		if (rv != 0) {
   11952 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11953 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11954 			break;
   11955 		}
   11956 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11957 	}
   11958 
   11959 	sc->nvm.release(sc);
   11960 	return rv;
   11961 }
   11962 
   11963 /* Flash */
   11964 
   11965 static int
   11966 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11967 {
   11968 	uint32_t eecd;
   11969 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11970 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11971 	uint32_t nvm_dword = 0;
   11972 	uint8_t sig_byte = 0;
   11973  	int rv;
   11974 
   11975 	switch (sc->sc_type) {
   11976 	case WM_T_PCH_SPT:
   11977 	case WM_T_PCH_CNP:
   11978 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   11979 		act_offset = ICH_NVM_SIG_WORD * 2;
   11980 
   11981 		/* set bank to 0 in case flash read fails. */
   11982 		*bank = 0;
   11983 
   11984 		/* Check bank 0 */
   11985 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   11986 		if (rv != 0)
   11987 			return rv;
   11988 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   11989 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11990 			*bank = 0;
   11991 			return 0;
   11992 		}
   11993 
   11994 		/* Check bank 1 */
   11995 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   11996 		    &nvm_dword);
   11997 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   11998 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11999 			*bank = 1;
   12000 			return 0;
   12001 		}
   12002 		aprint_error_dev(sc->sc_dev,
   12003 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12004 		return -1;
   12005 	case WM_T_ICH8:
   12006 	case WM_T_ICH9:
   12007 		eecd = CSR_READ(sc, WMREG_EECD);
   12008 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12009 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12010 			return 0;
   12011 		}
   12012 		/* FALLTHROUGH */
   12013 	default:
   12014 		/* Default to 0 */
   12015 		*bank = 0;
   12016 
   12017 		/* Check bank 0 */
   12018 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12019 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12020 			*bank = 0;
   12021 			return 0;
   12022 		}
   12023 
   12024 		/* Check bank 1 */
   12025 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12026 		    &sig_byte);
   12027 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12028 			*bank = 1;
   12029 			return 0;
   12030 		}
   12031 	}
   12032 
   12033 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12034 		device_xname(sc->sc_dev)));
   12035 	return -1;
   12036 }
   12037 
   12038 /******************************************************************************
   12039  * This function does initial flash setup so that a new read/write/erase cycle
   12040  * can be started.
   12041  *
   12042  * sc - The pointer to the hw structure
   12043  ****************************************************************************/
   12044 static int32_t
   12045 wm_ich8_cycle_init(struct wm_softc *sc)
   12046 {
   12047 	uint16_t hsfsts;
   12048 	int32_t error = 1;
   12049 	int32_t i     = 0;
   12050 
   12051 	if (sc->sc_type >= WM_T_PCH_SPT)
   12052 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12053 	else
   12054 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12055 
   12056 	/* May be check the Flash Des Valid bit in Hw status */
   12057 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12058 		return error;
   12059 	}
   12060 
   12061 	/* Clear FCERR in Hw status by writing 1 */
   12062 	/* Clear DAEL in Hw status by writing a 1 */
   12063 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12064 
   12065 	if (sc->sc_type >= WM_T_PCH_SPT)
   12066 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12067 	else
   12068 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12069 
   12070 	/*
   12071 	 * Either we should have a hardware SPI cycle in progress bit to check
   12072 	 * against, in order to start a new cycle or FDONE bit should be
   12073 	 * changed in the hardware so that it is 1 after harware reset, which
   12074 	 * can then be used as an indication whether a cycle is in progress or
   12075 	 * has been completed .. we should also have some software semaphore
   12076 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12077 	 * threads access to those bits can be sequentiallized or a way so that
   12078 	 * 2 threads dont start the cycle at the same time
   12079 	 */
   12080 
   12081 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12082 		/*
   12083 		 * There is no cycle running at present, so we can start a
   12084 		 * cycle
   12085 		 */
   12086 
   12087 		/* Begin by setting Flash Cycle Done. */
   12088 		hsfsts |= HSFSTS_DONE;
   12089 		if (sc->sc_type >= WM_T_PCH_SPT)
   12090 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12091 			    hsfsts & 0xffffUL);
   12092 		else
   12093 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12094 		error = 0;
   12095 	} else {
   12096 		/*
   12097 		 * otherwise poll for sometime so the current cycle has a
   12098 		 * chance to end before giving up.
   12099 		 */
   12100 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12101 			if (sc->sc_type >= WM_T_PCH_SPT)
   12102 				hsfsts = ICH8_FLASH_READ32(sc,
   12103 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12104 			else
   12105 				hsfsts = ICH8_FLASH_READ16(sc,
   12106 				    ICH_FLASH_HSFSTS);
   12107 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12108 				error = 0;
   12109 				break;
   12110 			}
   12111 			delay(1);
   12112 		}
   12113 		if (error == 0) {
   12114 			/*
   12115 			 * Successful in waiting for previous cycle to timeout,
   12116 			 * now set the Flash Cycle Done.
   12117 			 */
   12118 			hsfsts |= HSFSTS_DONE;
   12119 			if (sc->sc_type >= WM_T_PCH_SPT)
   12120 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12121 				    hsfsts & 0xffffUL);
   12122 			else
   12123 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12124 				    hsfsts);
   12125 		}
   12126 	}
   12127 	return error;
   12128 }
   12129 
   12130 /******************************************************************************
   12131  * This function starts a flash cycle and waits for its completion
   12132  *
   12133  * sc - The pointer to the hw structure
   12134  ****************************************************************************/
   12135 static int32_t
   12136 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12137 {
   12138 	uint16_t hsflctl;
   12139 	uint16_t hsfsts;
   12140 	int32_t error = 1;
   12141 	uint32_t i = 0;
   12142 
   12143 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12144 	if (sc->sc_type >= WM_T_PCH_SPT)
   12145 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12146 	else
   12147 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12148 	hsflctl |= HSFCTL_GO;
   12149 	if (sc->sc_type >= WM_T_PCH_SPT)
   12150 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12151 		    (uint32_t)hsflctl << 16);
   12152 	else
   12153 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12154 
   12155 	/* Wait till FDONE bit is set to 1 */
   12156 	do {
   12157 		if (sc->sc_type >= WM_T_PCH_SPT)
   12158 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12159 			    & 0xffffUL;
   12160 		else
   12161 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12162 		if (hsfsts & HSFSTS_DONE)
   12163 			break;
   12164 		delay(1);
   12165 		i++;
   12166 	} while (i < timeout);
   12167 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12168 		error = 0;
   12169 
   12170 	return error;
   12171 }
   12172 
   12173 /******************************************************************************
   12174  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12175  *
   12176  * sc - The pointer to the hw structure
   12177  * index - The index of the byte or word to read.
   12178  * size - Size of data to read, 1=byte 2=word, 4=dword
   12179  * data - Pointer to the word to store the value read.
   12180  *****************************************************************************/
   12181 static int32_t
   12182 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12183     uint32_t size, uint32_t *data)
   12184 {
   12185 	uint16_t hsfsts;
   12186 	uint16_t hsflctl;
   12187 	uint32_t flash_linear_address;
   12188 	uint32_t flash_data = 0;
   12189 	int32_t error = 1;
   12190 	int32_t count = 0;
   12191 
   12192 	if (size < 1  || size > 4 || data == 0x0 ||
   12193 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12194 		return error;
   12195 
   12196 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12197 	    sc->sc_ich8_flash_base;
   12198 
   12199 	do {
   12200 		delay(1);
   12201 		/* Steps */
   12202 		error = wm_ich8_cycle_init(sc);
   12203 		if (error)
   12204 			break;
   12205 
   12206 		if (sc->sc_type >= WM_T_PCH_SPT)
   12207 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12208 			    >> 16;
   12209 		else
   12210 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12211 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12212 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12213 		    & HSFCTL_BCOUNT_MASK;
   12214 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12215 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12216 			/*
   12217 			 * In SPT, This register is in Lan memory space, not
   12218 			 * flash. Therefore, only 32 bit access is supported.
   12219 			 */
   12220 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12221 			    (uint32_t)hsflctl << 16);
   12222 		} else
   12223 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12224 
   12225 		/*
   12226 		 * Write the last 24 bits of index into Flash Linear address
   12227 		 * field in Flash Address
   12228 		 */
   12229 		/* TODO: TBD maybe check the index against the size of flash */
   12230 
   12231 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12232 
   12233 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12234 
   12235 		/*
   12236 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12237 		 * the whole sequence a few more times, else read in (shift in)
   12238 		 * the Flash Data0, the order is least significant byte first
   12239 		 * msb to lsb
   12240 		 */
   12241 		if (error == 0) {
   12242 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12243 			if (size == 1)
   12244 				*data = (uint8_t)(flash_data & 0x000000FF);
   12245 			else if (size == 2)
   12246 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12247 			else if (size == 4)
   12248 				*data = (uint32_t)flash_data;
   12249 			break;
   12250 		} else {
   12251 			/*
   12252 			 * If we've gotten here, then things are probably
   12253 			 * completely hosed, but if the error condition is
   12254 			 * detected, it won't hurt to give it another try...
   12255 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12256 			 */
   12257 			if (sc->sc_type >= WM_T_PCH_SPT)
   12258 				hsfsts = ICH8_FLASH_READ32(sc,
   12259 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12260 			else
   12261 				hsfsts = ICH8_FLASH_READ16(sc,
   12262 				    ICH_FLASH_HSFSTS);
   12263 
   12264 			if (hsfsts & HSFSTS_ERR) {
   12265 				/* Repeat for some time before giving up. */
   12266 				continue;
   12267 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12268 				break;
   12269 		}
   12270 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12271 
   12272 	return error;
   12273 }
   12274 
   12275 /******************************************************************************
   12276  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12277  *
   12278  * sc - pointer to wm_hw structure
   12279  * index - The index of the byte to read.
   12280  * data - Pointer to a byte to store the value read.
   12281  *****************************************************************************/
   12282 static int32_t
   12283 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12284 {
   12285 	int32_t status;
   12286 	uint32_t word = 0;
   12287 
   12288 	status = wm_read_ich8_data(sc, index, 1, &word);
   12289 	if (status == 0)
   12290 		*data = (uint8_t)word;
   12291 	else
   12292 		*data = 0;
   12293 
   12294 	return status;
   12295 }
   12296 
   12297 /******************************************************************************
   12298  * Reads a word from the NVM using the ICH8 flash access registers.
   12299  *
   12300  * sc - pointer to wm_hw structure
   12301  * index - The starting byte index of the word to read.
   12302  * data - Pointer to a word to store the value read.
   12303  *****************************************************************************/
   12304 static int32_t
   12305 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12306 {
   12307 	int32_t status;
   12308 	uint32_t word = 0;
   12309 
   12310 	status = wm_read_ich8_data(sc, index, 2, &word);
   12311 	if (status == 0)
   12312 		*data = (uint16_t)word;
   12313 	else
   12314 		*data = 0;
   12315 
   12316 	return status;
   12317 }
   12318 
   12319 /******************************************************************************
   12320  * Reads a dword from the NVM using the ICH8 flash access registers.
   12321  *
   12322  * sc - pointer to wm_hw structure
   12323  * index - The starting byte index of the word to read.
   12324  * data - Pointer to a word to store the value read.
   12325  *****************************************************************************/
   12326 static int32_t
   12327 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12328 {
   12329 	int32_t status;
   12330 
   12331 	status = wm_read_ich8_data(sc, index, 4, data);
   12332 	return status;
   12333 }
   12334 
   12335 /******************************************************************************
   12336  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12337  * register.
   12338  *
   12339  * sc - Struct containing variables accessed by shared code
   12340  * offset - offset of word in the EEPROM to read
   12341  * data - word read from the EEPROM
   12342  * words - number of words to read
   12343  *****************************************************************************/
   12344 static int
   12345 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12346 {
   12347 	int32_t  rv = 0;
   12348 	uint32_t flash_bank = 0;
   12349 	uint32_t act_offset = 0;
   12350 	uint32_t bank_offset = 0;
   12351 	uint16_t word = 0;
   12352 	uint16_t i = 0;
   12353 
   12354 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12355 		device_xname(sc->sc_dev), __func__));
   12356 
   12357 	if (sc->nvm.acquire(sc) != 0)
   12358 		return -1;
   12359 
   12360 	/*
   12361 	 * We need to know which is the valid flash bank.  In the event
   12362 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12363 	 * managing flash_bank.  So it cannot be trusted and needs
   12364 	 * to be updated with each read.
   12365 	 */
   12366 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12367 	if (rv) {
   12368 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12369 			device_xname(sc->sc_dev)));
   12370 		flash_bank = 0;
   12371 	}
   12372 
   12373 	/*
   12374 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12375 	 * size
   12376 	 */
   12377 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12378 
   12379 	for (i = 0; i < words; i++) {
   12380 		/* The NVM part needs a byte offset, hence * 2 */
   12381 		act_offset = bank_offset + ((offset + i) * 2);
   12382 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12383 		if (rv) {
   12384 			aprint_error_dev(sc->sc_dev,
   12385 			    "%s: failed to read NVM\n", __func__);
   12386 			break;
   12387 		}
   12388 		data[i] = word;
   12389 	}
   12390 
   12391 	sc->nvm.release(sc);
   12392 	return rv;
   12393 }
   12394 
   12395 /******************************************************************************
   12396  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12397  * register.
   12398  *
   12399  * sc - Struct containing variables accessed by shared code
   12400  * offset - offset of word in the EEPROM to read
   12401  * data - word read from the EEPROM
   12402  * words - number of words to read
   12403  *****************************************************************************/
   12404 static int
   12405 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12406 {
   12407 	int32_t  rv = 0;
   12408 	uint32_t flash_bank = 0;
   12409 	uint32_t act_offset = 0;
   12410 	uint32_t bank_offset = 0;
   12411 	uint32_t dword = 0;
   12412 	uint16_t i = 0;
   12413 
   12414 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12415 		device_xname(sc->sc_dev), __func__));
   12416 
   12417 	if (sc->nvm.acquire(sc) != 0)
   12418 		return -1;
   12419 
   12420 	/*
   12421 	 * We need to know which is the valid flash bank.  In the event
   12422 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12423 	 * managing flash_bank.  So it cannot be trusted and needs
   12424 	 * to be updated with each read.
   12425 	 */
   12426 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12427 	if (rv) {
   12428 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12429 			device_xname(sc->sc_dev)));
   12430 		flash_bank = 0;
   12431 	}
   12432 
   12433 	/*
   12434 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12435 	 * size
   12436 	 */
   12437 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12438 
   12439 	for (i = 0; i < words; i++) {
   12440 		/* The NVM part needs a byte offset, hence * 2 */
   12441 		act_offset = bank_offset + ((offset + i) * 2);
   12442 		/* but we must read dword aligned, so mask ... */
   12443 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12444 		if (rv) {
   12445 			aprint_error_dev(sc->sc_dev,
   12446 			    "%s: failed to read NVM\n", __func__);
   12447 			break;
   12448 		}
   12449 		/* ... and pick out low or high word */
   12450 		if ((act_offset & 0x2) == 0)
   12451 			data[i] = (uint16_t)(dword & 0xFFFF);
   12452 		else
   12453 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12454 	}
   12455 
   12456 	sc->nvm.release(sc);
   12457 	return rv;
   12458 }
   12459 
   12460 /* iNVM */
   12461 
   12462 static int
   12463 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12464 {
   12465 	int32_t  rv = 0;
   12466 	uint32_t invm_dword;
   12467 	uint16_t i;
   12468 	uint8_t record_type, word_address;
   12469 
   12470 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12471 		device_xname(sc->sc_dev), __func__));
   12472 
   12473 	for (i = 0; i < INVM_SIZE; i++) {
   12474 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12475 		/* Get record type */
   12476 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12477 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12478 			break;
   12479 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12480 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12481 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12482 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12483 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12484 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12485 			if (word_address == address) {
   12486 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12487 				rv = 0;
   12488 				break;
   12489 			}
   12490 		}
   12491 	}
   12492 
   12493 	return rv;
   12494 }
   12495 
   12496 static int
   12497 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12498 {
   12499 	int rv = 0;
   12500 	int i;
   12501 
   12502 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12503 		device_xname(sc->sc_dev), __func__));
   12504 
   12505 	if (sc->nvm.acquire(sc) != 0)
   12506 		return -1;
   12507 
   12508 	for (i = 0; i < words; i++) {
   12509 		switch (offset + i) {
   12510 		case NVM_OFF_MACADDR:
   12511 		case NVM_OFF_MACADDR1:
   12512 		case NVM_OFF_MACADDR2:
   12513 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12514 			if (rv != 0) {
   12515 				data[i] = 0xffff;
   12516 				rv = -1;
   12517 			}
   12518 			break;
   12519 		case NVM_OFF_CFG2:
   12520 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12521 			if (rv != 0) {
   12522 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12523 				rv = 0;
   12524 			}
   12525 			break;
   12526 		case NVM_OFF_CFG4:
   12527 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12528 			if (rv != 0) {
   12529 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12530 				rv = 0;
   12531 			}
   12532 			break;
   12533 		case NVM_OFF_LED_1_CFG:
   12534 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12535 			if (rv != 0) {
   12536 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12537 				rv = 0;
   12538 			}
   12539 			break;
   12540 		case NVM_OFF_LED_0_2_CFG:
   12541 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12542 			if (rv != 0) {
   12543 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12544 				rv = 0;
   12545 			}
   12546 			break;
   12547 		case NVM_OFF_ID_LED_SETTINGS:
   12548 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12549 			if (rv != 0) {
   12550 				*data = ID_LED_RESERVED_FFFF;
   12551 				rv = 0;
   12552 			}
   12553 			break;
   12554 		default:
   12555 			DPRINTF(WM_DEBUG_NVM,
   12556 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12557 			*data = NVM_RESERVED_WORD;
   12558 			break;
   12559 		}
   12560 	}
   12561 
   12562 	sc->nvm.release(sc);
   12563 	return rv;
   12564 }
   12565 
   12566 /* Lock, detecting NVM type, validate checksum, version and read */
   12567 
   12568 static int
   12569 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12570 {
   12571 	uint32_t eecd = 0;
   12572 
   12573 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12574 	    || sc->sc_type == WM_T_82583) {
   12575 		eecd = CSR_READ(sc, WMREG_EECD);
   12576 
   12577 		/* Isolate bits 15 & 16 */
   12578 		eecd = ((eecd >> 15) & 0x03);
   12579 
   12580 		/* If both bits are set, device is Flash type */
   12581 		if (eecd == 0x03)
   12582 			return 0;
   12583 	}
   12584 	return 1;
   12585 }
   12586 
   12587 static int
   12588 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12589 {
   12590 	uint32_t eec;
   12591 
   12592 	eec = CSR_READ(sc, WMREG_EEC);
   12593 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12594 		return 1;
   12595 
   12596 	return 0;
   12597 }
   12598 
   12599 /*
   12600  * wm_nvm_validate_checksum
   12601  *
   12602  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12603  */
   12604 static int
   12605 wm_nvm_validate_checksum(struct wm_softc *sc)
   12606 {
   12607 	uint16_t checksum;
   12608 	uint16_t eeprom_data;
   12609 #ifdef WM_DEBUG
   12610 	uint16_t csum_wordaddr, valid_checksum;
   12611 #endif
   12612 	int i;
   12613 
   12614 	checksum = 0;
   12615 
   12616 	/* Don't check for I211 */
   12617 	if (sc->sc_type == WM_T_I211)
   12618 		return 0;
   12619 
   12620 #ifdef WM_DEBUG
   12621 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12622 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12623 		csum_wordaddr = NVM_OFF_COMPAT;
   12624 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12625 	} else {
   12626 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12627 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12628 	}
   12629 
   12630 	/* Dump EEPROM image for debug */
   12631 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12632 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12633 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12634 		/* XXX PCH_SPT? */
   12635 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12636 		if ((eeprom_data & valid_checksum) == 0) {
   12637 			DPRINTF(WM_DEBUG_NVM,
   12638 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12639 				device_xname(sc->sc_dev), eeprom_data,
   12640 				    valid_checksum));
   12641 		}
   12642 	}
   12643 
   12644 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12645 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12646 		for (i = 0; i < NVM_SIZE; i++) {
   12647 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12648 				printf("XXXX ");
   12649 			else
   12650 				printf("%04hx ", eeprom_data);
   12651 			if (i % 8 == 7)
   12652 				printf("\n");
   12653 		}
   12654 	}
   12655 
   12656 #endif /* WM_DEBUG */
   12657 
   12658 	for (i = 0; i < NVM_SIZE; i++) {
   12659 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12660 			return 1;
   12661 		checksum += eeprom_data;
   12662 	}
   12663 
   12664 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12665 #ifdef WM_DEBUG
   12666 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12667 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12668 #endif
   12669 	}
   12670 
   12671 	return 0;
   12672 }
   12673 
   12674 static void
   12675 wm_nvm_version_invm(struct wm_softc *sc)
   12676 {
   12677 	uint32_t dword;
   12678 
   12679 	/*
   12680 	 * Linux's code to decode version is very strange, so we don't
   12681 	 * obey that algorithm and just use word 61 as the document.
   12682 	 * Perhaps it's not perfect though...
   12683 	 *
   12684 	 * Example:
   12685 	 *
   12686 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12687 	 */
   12688 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12689 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12690 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12691 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12692 }
   12693 
   12694 static void
   12695 wm_nvm_version(struct wm_softc *sc)
   12696 {
   12697 	uint16_t major, minor, build, patch;
   12698 	uint16_t uid0, uid1;
   12699 	uint16_t nvm_data;
   12700 	uint16_t off;
   12701 	bool check_version = false;
   12702 	bool check_optionrom = false;
   12703 	bool have_build = false;
   12704 	bool have_uid = true;
   12705 
   12706 	/*
   12707 	 * Version format:
   12708 	 *
   12709 	 * XYYZ
   12710 	 * X0YZ
   12711 	 * X0YY
   12712 	 *
   12713 	 * Example:
   12714 	 *
   12715 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12716 	 *	82571	0x50a6	5.10.6?
   12717 	 *	82572	0x506a	5.6.10?
   12718 	 *	82572EI	0x5069	5.6.9?
   12719 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12720 	 *		0x2013	2.1.3?
   12721 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12722 	 */
   12723 
   12724 	/*
   12725 	 * XXX
   12726 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12727 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12728 	 */
   12729 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12730 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12731 		have_uid = false;
   12732 
   12733 	switch (sc->sc_type) {
   12734 	case WM_T_82571:
   12735 	case WM_T_82572:
   12736 	case WM_T_82574:
   12737 	case WM_T_82583:
   12738 		check_version = true;
   12739 		check_optionrom = true;
   12740 		have_build = true;
   12741 		break;
   12742 	case WM_T_82575:
   12743 	case WM_T_82576:
   12744 	case WM_T_82580:
   12745 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12746 			check_version = true;
   12747 		break;
   12748 	case WM_T_I211:
   12749 		wm_nvm_version_invm(sc);
   12750 		have_uid = false;
   12751 		goto printver;
   12752 	case WM_T_I210:
   12753 		if (!wm_nvm_flash_presence_i210(sc)) {
   12754 			wm_nvm_version_invm(sc);
   12755 			have_uid = false;
   12756 			goto printver;
   12757 		}
   12758 		/* FALLTHROUGH */
   12759 	case WM_T_I350:
   12760 	case WM_T_I354:
   12761 		check_version = true;
   12762 		check_optionrom = true;
   12763 		break;
   12764 	default:
   12765 		return;
   12766 	}
   12767 	if (check_version
   12768 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12769 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12770 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12771 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12772 			build = nvm_data & NVM_BUILD_MASK;
   12773 			have_build = true;
   12774 		} else
   12775 			minor = nvm_data & 0x00ff;
   12776 
   12777 		/* Decimal */
   12778 		minor = (minor / 16) * 10 + (minor % 16);
   12779 		sc->sc_nvm_ver_major = major;
   12780 		sc->sc_nvm_ver_minor = minor;
   12781 
   12782 printver:
   12783 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12784 		    sc->sc_nvm_ver_minor);
   12785 		if (have_build) {
   12786 			sc->sc_nvm_ver_build = build;
   12787 			aprint_verbose(".%d", build);
   12788 		}
   12789 	}
   12790 
   12791 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12792 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12793 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12794 		/* Option ROM Version */
   12795 		if ((off != 0x0000) && (off != 0xffff)) {
   12796 			int rv;
   12797 
   12798 			off += NVM_COMBO_VER_OFF;
   12799 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12800 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12801 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12802 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12803 				/* 16bits */
   12804 				major = uid0 >> 8;
   12805 				build = (uid0 << 8) | (uid1 >> 8);
   12806 				patch = uid1 & 0x00ff;
   12807 				aprint_verbose(", option ROM Version %d.%d.%d",
   12808 				    major, build, patch);
   12809 			}
   12810 		}
   12811 	}
   12812 
   12813 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12814 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12815 }
   12816 
   12817 /*
   12818  * wm_nvm_read:
   12819  *
   12820  *	Read data from the serial EEPROM.
   12821  */
   12822 static int
   12823 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12824 {
   12825 	int rv;
   12826 
   12827 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12828 		device_xname(sc->sc_dev), __func__));
   12829 
   12830 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12831 		return -1;
   12832 
   12833 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12834 
   12835 	return rv;
   12836 }
   12837 
   12838 /*
   12839  * Hardware semaphores.
   12840  * Very complexed...
   12841  */
   12842 
   12843 static int
   12844 wm_get_null(struct wm_softc *sc)
   12845 {
   12846 
   12847 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12848 		device_xname(sc->sc_dev), __func__));
   12849 	return 0;
   12850 }
   12851 
   12852 static void
   12853 wm_put_null(struct wm_softc *sc)
   12854 {
   12855 
   12856 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12857 		device_xname(sc->sc_dev), __func__));
   12858 	return;
   12859 }
   12860 
   12861 static int
   12862 wm_get_eecd(struct wm_softc *sc)
   12863 {
   12864 	uint32_t reg;
   12865 	int x;
   12866 
   12867 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12868 		device_xname(sc->sc_dev), __func__));
   12869 
   12870 	reg = CSR_READ(sc, WMREG_EECD);
   12871 
   12872 	/* Request EEPROM access. */
   12873 	reg |= EECD_EE_REQ;
   12874 	CSR_WRITE(sc, WMREG_EECD, reg);
   12875 
   12876 	/* ..and wait for it to be granted. */
   12877 	for (x = 0; x < 1000; x++) {
   12878 		reg = CSR_READ(sc, WMREG_EECD);
   12879 		if (reg & EECD_EE_GNT)
   12880 			break;
   12881 		delay(5);
   12882 	}
   12883 	if ((reg & EECD_EE_GNT) == 0) {
   12884 		aprint_error_dev(sc->sc_dev,
   12885 		    "could not acquire EEPROM GNT\n");
   12886 		reg &= ~EECD_EE_REQ;
   12887 		CSR_WRITE(sc, WMREG_EECD, reg);
   12888 		return -1;
   12889 	}
   12890 
   12891 	return 0;
   12892 }
   12893 
   12894 static void
   12895 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12896 {
   12897 
   12898 	*eecd |= EECD_SK;
   12899 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12900 	CSR_WRITE_FLUSH(sc);
   12901 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12902 		delay(1);
   12903 	else
   12904 		delay(50);
   12905 }
   12906 
   12907 static void
   12908 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12909 {
   12910 
   12911 	*eecd &= ~EECD_SK;
   12912 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12913 	CSR_WRITE_FLUSH(sc);
   12914 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12915 		delay(1);
   12916 	else
   12917 		delay(50);
   12918 }
   12919 
   12920 static void
   12921 wm_put_eecd(struct wm_softc *sc)
   12922 {
   12923 	uint32_t reg;
   12924 
   12925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12926 		device_xname(sc->sc_dev), __func__));
   12927 
   12928 	/* Stop nvm */
   12929 	reg = CSR_READ(sc, WMREG_EECD);
   12930 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12931 		/* Pull CS high */
   12932 		reg |= EECD_CS;
   12933 		wm_nvm_eec_clock_lower(sc, &reg);
   12934 	} else {
   12935 		/* CS on Microwire is active-high */
   12936 		reg &= ~(EECD_CS | EECD_DI);
   12937 		CSR_WRITE(sc, WMREG_EECD, reg);
   12938 		wm_nvm_eec_clock_raise(sc, &reg);
   12939 		wm_nvm_eec_clock_lower(sc, &reg);
   12940 	}
   12941 
   12942 	reg = CSR_READ(sc, WMREG_EECD);
   12943 	reg &= ~EECD_EE_REQ;
   12944 	CSR_WRITE(sc, WMREG_EECD, reg);
   12945 
   12946 	return;
   12947 }
   12948 
   12949 /*
   12950  * Get hardware semaphore.
   12951  * Same as e1000_get_hw_semaphore_generic()
   12952  */
   12953 static int
   12954 wm_get_swsm_semaphore(struct wm_softc *sc)
   12955 {
   12956 	int32_t timeout;
   12957 	uint32_t swsm;
   12958 
   12959 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12960 		device_xname(sc->sc_dev), __func__));
   12961 	KASSERT(sc->sc_nvm_wordsize > 0);
   12962 
   12963 retry:
   12964 	/* Get the SW semaphore. */
   12965 	timeout = sc->sc_nvm_wordsize + 1;
   12966 	while (timeout) {
   12967 		swsm = CSR_READ(sc, WMREG_SWSM);
   12968 
   12969 		if ((swsm & SWSM_SMBI) == 0)
   12970 			break;
   12971 
   12972 		delay(50);
   12973 		timeout--;
   12974 	}
   12975 
   12976 	if (timeout == 0) {
   12977 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12978 			/*
   12979 			 * In rare circumstances, the SW semaphore may already
   12980 			 * be held unintentionally. Clear the semaphore once
   12981 			 * before giving up.
   12982 			 */
   12983 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12984 			wm_put_swsm_semaphore(sc);
   12985 			goto retry;
   12986 		}
   12987 		aprint_error_dev(sc->sc_dev,
   12988 		    "could not acquire SWSM SMBI\n");
   12989 		return 1;
   12990 	}
   12991 
   12992 	/* Get the FW semaphore. */
   12993 	timeout = sc->sc_nvm_wordsize + 1;
   12994 	while (timeout) {
   12995 		swsm = CSR_READ(sc, WMREG_SWSM);
   12996 		swsm |= SWSM_SWESMBI;
   12997 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12998 		/* If we managed to set the bit we got the semaphore. */
   12999 		swsm = CSR_READ(sc, WMREG_SWSM);
   13000 		if (swsm & SWSM_SWESMBI)
   13001 			break;
   13002 
   13003 		delay(50);
   13004 		timeout--;
   13005 	}
   13006 
   13007 	if (timeout == 0) {
   13008 		aprint_error_dev(sc->sc_dev,
   13009 		    "could not acquire SWSM SWESMBI\n");
   13010 		/* Release semaphores */
   13011 		wm_put_swsm_semaphore(sc);
   13012 		return 1;
   13013 	}
   13014 	return 0;
   13015 }
   13016 
   13017 /*
   13018  * Put hardware semaphore.
   13019  * Same as e1000_put_hw_semaphore_generic()
   13020  */
   13021 static void
   13022 wm_put_swsm_semaphore(struct wm_softc *sc)
   13023 {
   13024 	uint32_t swsm;
   13025 
   13026 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13027 		device_xname(sc->sc_dev), __func__));
   13028 
   13029 	swsm = CSR_READ(sc, WMREG_SWSM);
   13030 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13031 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13032 }
   13033 
   13034 /*
   13035  * Get SW/FW semaphore.
   13036  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13037  */
   13038 static int
   13039 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13040 {
   13041 	uint32_t swfw_sync;
   13042 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13043 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13044 	int timeout;
   13045 
   13046 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13047 		device_xname(sc->sc_dev), __func__));
   13048 
   13049 	if (sc->sc_type == WM_T_80003)
   13050 		timeout = 50;
   13051 	else
   13052 		timeout = 200;
   13053 
   13054 	for (timeout = 0; timeout < 200; timeout++) {
   13055 		if (wm_get_swsm_semaphore(sc)) {
   13056 			aprint_error_dev(sc->sc_dev,
   13057 			    "%s: failed to get semaphore\n",
   13058 			    __func__);
   13059 			return 1;
   13060 		}
   13061 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13062 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13063 			swfw_sync |= swmask;
   13064 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13065 			wm_put_swsm_semaphore(sc);
   13066 			return 0;
   13067 		}
   13068 		wm_put_swsm_semaphore(sc);
   13069 		delay(5000);
   13070 	}
   13071 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13072 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13073 	return 1;
   13074 }
   13075 
   13076 static void
   13077 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13078 {
   13079 	uint32_t swfw_sync;
   13080 
   13081 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13082 		device_xname(sc->sc_dev), __func__));
   13083 
   13084 	while (wm_get_swsm_semaphore(sc) != 0)
   13085 		continue;
   13086 
   13087 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13088 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13089 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13090 
   13091 	wm_put_swsm_semaphore(sc);
   13092 }
   13093 
   13094 static int
   13095 wm_get_nvm_80003(struct wm_softc *sc)
   13096 {
   13097 	int rv;
   13098 
   13099 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13100 		device_xname(sc->sc_dev), __func__));
   13101 
   13102 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13103 		aprint_error_dev(sc->sc_dev,
   13104 		    "%s: failed to get semaphore(SWFW)\n",
   13105 		    __func__);
   13106 		return rv;
   13107 	}
   13108 
   13109 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13110 	    && (rv = wm_get_eecd(sc)) != 0) {
   13111 		aprint_error_dev(sc->sc_dev,
   13112 		    "%s: failed to get semaphore(EECD)\n",
   13113 		    __func__);
   13114 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13115 		return rv;
   13116 	}
   13117 
   13118 	return 0;
   13119 }
   13120 
   13121 static void
   13122 wm_put_nvm_80003(struct wm_softc *sc)
   13123 {
   13124 
   13125 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13126 		device_xname(sc->sc_dev), __func__));
   13127 
   13128 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13129 		wm_put_eecd(sc);
   13130 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13131 }
   13132 
   13133 static int
   13134 wm_get_nvm_82571(struct wm_softc *sc)
   13135 {
   13136 	int rv;
   13137 
   13138 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13139 		device_xname(sc->sc_dev), __func__));
   13140 
   13141 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13142 		return rv;
   13143 
   13144 	switch (sc->sc_type) {
   13145 	case WM_T_82573:
   13146 		break;
   13147 	default:
   13148 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13149 			rv = wm_get_eecd(sc);
   13150 		break;
   13151 	}
   13152 
   13153 	if (rv != 0) {
   13154 		aprint_error_dev(sc->sc_dev,
   13155 		    "%s: failed to get semaphore\n",
   13156 		    __func__);
   13157 		wm_put_swsm_semaphore(sc);
   13158 	}
   13159 
   13160 	return rv;
   13161 }
   13162 
   13163 static void
   13164 wm_put_nvm_82571(struct wm_softc *sc)
   13165 {
   13166 
   13167 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13168 		device_xname(sc->sc_dev), __func__));
   13169 
   13170 	switch (sc->sc_type) {
   13171 	case WM_T_82573:
   13172 		break;
   13173 	default:
   13174 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13175 			wm_put_eecd(sc);
   13176 		break;
   13177 	}
   13178 
   13179 	wm_put_swsm_semaphore(sc);
   13180 }
   13181 
   13182 static int
   13183 wm_get_phy_82575(struct wm_softc *sc)
   13184 {
   13185 
   13186 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13187 		device_xname(sc->sc_dev), __func__));
   13188 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13189 }
   13190 
   13191 static void
   13192 wm_put_phy_82575(struct wm_softc *sc)
   13193 {
   13194 
   13195 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13196 		device_xname(sc->sc_dev), __func__));
   13197 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13198 }
   13199 
   13200 static int
   13201 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13202 {
   13203 	uint32_t ext_ctrl;
   13204 	int timeout = 200;
   13205 
   13206 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13207 		device_xname(sc->sc_dev), __func__));
   13208 
   13209 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13210 	for (timeout = 0; timeout < 200; timeout++) {
   13211 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13212 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13213 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13214 
   13215 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13216 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13217 			return 0;
   13218 		delay(5000);
   13219 	}
   13220 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13221 	    device_xname(sc->sc_dev), ext_ctrl);
   13222 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13223 	return 1;
   13224 }
   13225 
   13226 static void
   13227 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13228 {
   13229 	uint32_t ext_ctrl;
   13230 
   13231 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13232 		device_xname(sc->sc_dev), __func__));
   13233 
   13234 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13235 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13236 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13237 
   13238 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13239 }
   13240 
   13241 static int
   13242 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13243 {
   13244 	uint32_t ext_ctrl;
   13245 	int timeout;
   13246 
   13247 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13248 		device_xname(sc->sc_dev), __func__));
   13249 	mutex_enter(sc->sc_ich_phymtx);
   13250 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13251 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13252 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13253 			break;
   13254 		delay(1000);
   13255 	}
   13256 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13257 		printf("%s: SW has already locked the resource\n",
   13258 		    device_xname(sc->sc_dev));
   13259 		goto out;
   13260 	}
   13261 
   13262 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13263 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13264 	for (timeout = 0; timeout < 1000; timeout++) {
   13265 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13266 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13267 			break;
   13268 		delay(1000);
   13269 	}
   13270 	if (timeout >= 1000) {
   13271 		printf("%s: failed to acquire semaphore\n",
   13272 		    device_xname(sc->sc_dev));
   13273 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13274 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13275 		goto out;
   13276 	}
   13277 	return 0;
   13278 
   13279 out:
   13280 	mutex_exit(sc->sc_ich_phymtx);
   13281 	return 1;
   13282 }
   13283 
   13284 static void
   13285 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13286 {
   13287 	uint32_t ext_ctrl;
   13288 
   13289 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13290 		device_xname(sc->sc_dev), __func__));
   13291 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13292 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13293 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13294 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13295 	} else {
   13296 		printf("%s: Semaphore unexpectedly released\n",
   13297 		    device_xname(sc->sc_dev));
   13298 	}
   13299 
   13300 	mutex_exit(sc->sc_ich_phymtx);
   13301 }
   13302 
   13303 static int
   13304 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13305 {
   13306 
   13307 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13308 		device_xname(sc->sc_dev), __func__));
   13309 	mutex_enter(sc->sc_ich_nvmmtx);
   13310 
   13311 	return 0;
   13312 }
   13313 
   13314 static void
   13315 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13316 {
   13317 
   13318 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13319 		device_xname(sc->sc_dev), __func__));
   13320 	mutex_exit(sc->sc_ich_nvmmtx);
   13321 }
   13322 
   13323 static int
   13324 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13325 {
   13326 	int i = 0;
   13327 	uint32_t reg;
   13328 
   13329 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13330 		device_xname(sc->sc_dev), __func__));
   13331 
   13332 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13333 	do {
   13334 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13335 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13336 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13337 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13338 			break;
   13339 		delay(2*1000);
   13340 		i++;
   13341 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13342 
   13343 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13344 		wm_put_hw_semaphore_82573(sc);
   13345 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13346 		    device_xname(sc->sc_dev));
   13347 		return -1;
   13348 	}
   13349 
   13350 	return 0;
   13351 }
   13352 
   13353 static void
   13354 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13355 {
   13356 	uint32_t reg;
   13357 
   13358 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13359 		device_xname(sc->sc_dev), __func__));
   13360 
   13361 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13362 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13363 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13364 }
   13365 
   13366 /*
   13367  * Management mode and power management related subroutines.
   13368  * BMC, AMT, suspend/resume and EEE.
   13369  */
   13370 
   13371 #ifdef WM_WOL
   13372 static int
   13373 wm_check_mng_mode(struct wm_softc *sc)
   13374 {
   13375 	int rv;
   13376 
   13377 	switch (sc->sc_type) {
   13378 	case WM_T_ICH8:
   13379 	case WM_T_ICH9:
   13380 	case WM_T_ICH10:
   13381 	case WM_T_PCH:
   13382 	case WM_T_PCH2:
   13383 	case WM_T_PCH_LPT:
   13384 	case WM_T_PCH_SPT:
   13385 	case WM_T_PCH_CNP:
   13386 		rv = wm_check_mng_mode_ich8lan(sc);
   13387 		break;
   13388 	case WM_T_82574:
   13389 	case WM_T_82583:
   13390 		rv = wm_check_mng_mode_82574(sc);
   13391 		break;
   13392 	case WM_T_82571:
   13393 	case WM_T_82572:
   13394 	case WM_T_82573:
   13395 	case WM_T_80003:
   13396 		rv = wm_check_mng_mode_generic(sc);
   13397 		break;
   13398 	default:
   13399 		/* noting to do */
   13400 		rv = 0;
   13401 		break;
   13402 	}
   13403 
   13404 	return rv;
   13405 }
   13406 
   13407 static int
   13408 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13409 {
   13410 	uint32_t fwsm;
   13411 
   13412 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13413 
   13414 	if (((fwsm & FWSM_FW_VALID) != 0)
   13415 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13416 		return 1;
   13417 
   13418 	return 0;
   13419 }
   13420 
   13421 static int
   13422 wm_check_mng_mode_82574(struct wm_softc *sc)
   13423 {
   13424 	uint16_t data;
   13425 
   13426 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13427 
   13428 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13429 		return 1;
   13430 
   13431 	return 0;
   13432 }
   13433 
   13434 static int
   13435 wm_check_mng_mode_generic(struct wm_softc *sc)
   13436 {
   13437 	uint32_t fwsm;
   13438 
   13439 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13440 
   13441 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13442 		return 1;
   13443 
   13444 	return 0;
   13445 }
   13446 #endif /* WM_WOL */
   13447 
   13448 static int
   13449 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13450 {
   13451 	uint32_t manc, fwsm, factps;
   13452 
   13453 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13454 		return 0;
   13455 
   13456 	manc = CSR_READ(sc, WMREG_MANC);
   13457 
   13458 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13459 		device_xname(sc->sc_dev), manc));
   13460 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13461 		return 0;
   13462 
   13463 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13464 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13465 		factps = CSR_READ(sc, WMREG_FACTPS);
   13466 		if (((factps & FACTPS_MNGCG) == 0)
   13467 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13468 			return 1;
   13469 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13470 		uint16_t data;
   13471 
   13472 		factps = CSR_READ(sc, WMREG_FACTPS);
   13473 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13474 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13475 			device_xname(sc->sc_dev), factps, data));
   13476 		if (((factps & FACTPS_MNGCG) == 0)
   13477 		    && ((data & NVM_CFG2_MNGM_MASK)
   13478 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13479 			return 1;
   13480 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13481 	    && ((manc & MANC_ASF_EN) == 0))
   13482 		return 1;
   13483 
   13484 	return 0;
   13485 }
   13486 
   13487 static bool
   13488 wm_phy_resetisblocked(struct wm_softc *sc)
   13489 {
   13490 	bool blocked = false;
   13491 	uint32_t reg;
   13492 	int i = 0;
   13493 
   13494 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13495 		device_xname(sc->sc_dev), __func__));
   13496 
   13497 	switch (sc->sc_type) {
   13498 	case WM_T_ICH8:
   13499 	case WM_T_ICH9:
   13500 	case WM_T_ICH10:
   13501 	case WM_T_PCH:
   13502 	case WM_T_PCH2:
   13503 	case WM_T_PCH_LPT:
   13504 	case WM_T_PCH_SPT:
   13505 	case WM_T_PCH_CNP:
   13506 		do {
   13507 			reg = CSR_READ(sc, WMREG_FWSM);
   13508 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13509 				blocked = true;
   13510 				delay(10*1000);
   13511 				continue;
   13512 			}
   13513 			blocked = false;
   13514 		} while (blocked && (i++ < 30));
   13515 		return blocked;
   13516 		break;
   13517 	case WM_T_82571:
   13518 	case WM_T_82572:
   13519 	case WM_T_82573:
   13520 	case WM_T_82574:
   13521 	case WM_T_82583:
   13522 	case WM_T_80003:
   13523 		reg = CSR_READ(sc, WMREG_MANC);
   13524 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13525 			return true;
   13526 		else
   13527 			return false;
   13528 		break;
   13529 	default:
   13530 		/* no problem */
   13531 		break;
   13532 	}
   13533 
   13534 	return false;
   13535 }
   13536 
   13537 static void
   13538 wm_get_hw_control(struct wm_softc *sc)
   13539 {
   13540 	uint32_t reg;
   13541 
   13542 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13543 		device_xname(sc->sc_dev), __func__));
   13544 
   13545 	if (sc->sc_type == WM_T_82573) {
   13546 		reg = CSR_READ(sc, WMREG_SWSM);
   13547 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13548 	} else if (sc->sc_type >= WM_T_82571) {
   13549 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13550 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13551 	}
   13552 }
   13553 
   13554 static void
   13555 wm_release_hw_control(struct wm_softc *sc)
   13556 {
   13557 	uint32_t reg;
   13558 
   13559 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13560 		device_xname(sc->sc_dev), __func__));
   13561 
   13562 	if (sc->sc_type == WM_T_82573) {
   13563 		reg = CSR_READ(sc, WMREG_SWSM);
   13564 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13565 	} else if (sc->sc_type >= WM_T_82571) {
   13566 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13567 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13568 	}
   13569 }
   13570 
   13571 static void
   13572 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13573 {
   13574 	uint32_t reg;
   13575 
   13576 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13577 		device_xname(sc->sc_dev), __func__));
   13578 
   13579 	if (sc->sc_type < WM_T_PCH2)
   13580 		return;
   13581 
   13582 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13583 
   13584 	if (gate)
   13585 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13586 	else
   13587 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13588 
   13589 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13590 }
   13591 
   13592 static void
   13593 wm_smbustopci(struct wm_softc *sc)
   13594 {
   13595 	uint32_t fwsm, reg;
   13596 	int rv = 0;
   13597 
   13598 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13599 		device_xname(sc->sc_dev), __func__));
   13600 
   13601 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13602 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13603 
   13604 	/* Disable ULP */
   13605 	wm_ulp_disable(sc);
   13606 
   13607 	/* Acquire PHY semaphore */
   13608 	sc->phy.acquire(sc);
   13609 
   13610 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13611 	switch (sc->sc_type) {
   13612 	case WM_T_PCH_LPT:
   13613 	case WM_T_PCH_SPT:
   13614 	case WM_T_PCH_CNP:
   13615 		if (wm_phy_is_accessible_pchlan(sc))
   13616 			break;
   13617 
   13618 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13619 		reg |= CTRL_EXT_FORCE_SMBUS;
   13620 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13621 #if 0
   13622 		/* XXX Isn't this required??? */
   13623 		CSR_WRITE_FLUSH(sc);
   13624 #endif
   13625 		delay(50 * 1000);
   13626 		/* FALLTHROUGH */
   13627 	case WM_T_PCH2:
   13628 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13629 			break;
   13630 		/* FALLTHROUGH */
   13631 	case WM_T_PCH:
   13632 		if (sc->sc_type == WM_T_PCH)
   13633 			if ((fwsm & FWSM_FW_VALID) != 0)
   13634 				break;
   13635 
   13636 		if (wm_phy_resetisblocked(sc) == true) {
   13637 			printf("XXX reset is blocked(3)\n");
   13638 			break;
   13639 		}
   13640 
   13641 		wm_toggle_lanphypc_pch_lpt(sc);
   13642 
   13643 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13644 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13645 				break;
   13646 
   13647 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13648 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13650 
   13651 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13652 				break;
   13653 			rv = -1;
   13654 		}
   13655 		break;
   13656 	default:
   13657 		break;
   13658 	}
   13659 
   13660 	/* Release semaphore */
   13661 	sc->phy.release(sc);
   13662 
   13663 	if (rv == 0) {
   13664 		if (wm_phy_resetisblocked(sc)) {
   13665 			printf("XXX reset is blocked(4)\n");
   13666 			goto out;
   13667 		}
   13668 		wm_reset_phy(sc);
   13669 		if (wm_phy_resetisblocked(sc))
   13670 			printf("XXX reset is blocked(4)\n");
   13671 	}
   13672 
   13673 out:
   13674 	/*
   13675 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13676 	 */
   13677 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13678 		delay(10*1000);
   13679 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13680 	}
   13681 }
   13682 
   13683 static void
   13684 wm_init_manageability(struct wm_softc *sc)
   13685 {
   13686 
   13687 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13688 		device_xname(sc->sc_dev), __func__));
   13689 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13690 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13691 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13692 
   13693 		/* Disable hardware interception of ARP */
   13694 		manc &= ~MANC_ARP_EN;
   13695 
   13696 		/* Enable receiving management packets to the host */
   13697 		if (sc->sc_type >= WM_T_82571) {
   13698 			manc |= MANC_EN_MNG2HOST;
   13699 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13700 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13701 		}
   13702 
   13703 		CSR_WRITE(sc, WMREG_MANC, manc);
   13704 	}
   13705 }
   13706 
   13707 static void
   13708 wm_release_manageability(struct wm_softc *sc)
   13709 {
   13710 
   13711 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13712 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13713 
   13714 		manc |= MANC_ARP_EN;
   13715 		if (sc->sc_type >= WM_T_82571)
   13716 			manc &= ~MANC_EN_MNG2HOST;
   13717 
   13718 		CSR_WRITE(sc, WMREG_MANC, manc);
   13719 	}
   13720 }
   13721 
   13722 static void
   13723 wm_get_wakeup(struct wm_softc *sc)
   13724 {
   13725 
   13726 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13727 	switch (sc->sc_type) {
   13728 	case WM_T_82573:
   13729 	case WM_T_82583:
   13730 		sc->sc_flags |= WM_F_HAS_AMT;
   13731 		/* FALLTHROUGH */
   13732 	case WM_T_80003:
   13733 	case WM_T_82575:
   13734 	case WM_T_82576:
   13735 	case WM_T_82580:
   13736 	case WM_T_I350:
   13737 	case WM_T_I354:
   13738 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13739 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13740 		/* FALLTHROUGH */
   13741 	case WM_T_82541:
   13742 	case WM_T_82541_2:
   13743 	case WM_T_82547:
   13744 	case WM_T_82547_2:
   13745 	case WM_T_82571:
   13746 	case WM_T_82572:
   13747 	case WM_T_82574:
   13748 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13749 		break;
   13750 	case WM_T_ICH8:
   13751 	case WM_T_ICH9:
   13752 	case WM_T_ICH10:
   13753 	case WM_T_PCH:
   13754 	case WM_T_PCH2:
   13755 	case WM_T_PCH_LPT:
   13756 	case WM_T_PCH_SPT:
   13757 	case WM_T_PCH_CNP:
   13758 		sc->sc_flags |= WM_F_HAS_AMT;
   13759 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13760 		break;
   13761 	default:
   13762 		break;
   13763 	}
   13764 
   13765 	/* 1: HAS_MANAGE */
   13766 	if (wm_enable_mng_pass_thru(sc) != 0)
   13767 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13768 
   13769 	/*
   13770 	 * Note that the WOL flags is set after the resetting of the eeprom
   13771 	 * stuff
   13772 	 */
   13773 }
   13774 
   13775 /*
   13776  * Unconfigure Ultra Low Power mode.
   13777  * Only for I217 and newer (see below).
   13778  */
   13779 static void
   13780 wm_ulp_disable(struct wm_softc *sc)
   13781 {
   13782 	uint32_t reg;
   13783 	int i = 0;
   13784 
   13785 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13786 		device_xname(sc->sc_dev), __func__));
   13787 	/* Exclude old devices */
   13788 	if ((sc->sc_type < WM_T_PCH_LPT)
   13789 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13790 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13791 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13792 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13793 		return;
   13794 
   13795 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13796 		/* Request ME un-configure ULP mode in the PHY */
   13797 		reg = CSR_READ(sc, WMREG_H2ME);
   13798 		reg &= ~H2ME_ULP;
   13799 		reg |= H2ME_ENFORCE_SETTINGS;
   13800 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13801 
   13802 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13803 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13804 			if (i++ == 30) {
   13805 				printf("%s timed out\n", __func__);
   13806 				return;
   13807 			}
   13808 			delay(10 * 1000);
   13809 		}
   13810 		reg = CSR_READ(sc, WMREG_H2ME);
   13811 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13812 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13813 
   13814 		return;
   13815 	}
   13816 
   13817 	/* Acquire semaphore */
   13818 	sc->phy.acquire(sc);
   13819 
   13820 	/* Toggle LANPHYPC */
   13821 	wm_toggle_lanphypc_pch_lpt(sc);
   13822 
   13823 	/* Unforce SMBus mode in PHY */
   13824 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13825 	if (reg == 0x0000 || reg == 0xffff) {
   13826 		uint32_t reg2;
   13827 
   13828 		printf("%s: Force SMBus first.\n", __func__);
   13829 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13830 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13831 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13832 		delay(50 * 1000);
   13833 
   13834 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13835 	}
   13836 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13837 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13838 
   13839 	/* Unforce SMBus mode in MAC */
   13840 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13841 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13842 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13843 
   13844 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13845 	reg |= HV_PM_CTRL_K1_ENA;
   13846 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13847 
   13848 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13849 	reg &= ~(I218_ULP_CONFIG1_IND
   13850 	    | I218_ULP_CONFIG1_STICKY_ULP
   13851 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13852 	    | I218_ULP_CONFIG1_WOL_HOST
   13853 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13854 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13855 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13856 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13857 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13858 	reg |= I218_ULP_CONFIG1_START;
   13859 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13860 
   13861 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13862 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13863 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13864 
   13865 	/* Release semaphore */
   13866 	sc->phy.release(sc);
   13867 	wm_gmii_reset(sc);
   13868 	delay(50 * 1000);
   13869 }
   13870 
   13871 /* WOL in the newer chipset interfaces (pchlan) */
   13872 static void
   13873 wm_enable_phy_wakeup(struct wm_softc *sc)
   13874 {
   13875 #if 0
   13876 	uint16_t preg;
   13877 
   13878 	/* Copy MAC RARs to PHY RARs */
   13879 
   13880 	/* Copy MAC MTA to PHY MTA */
   13881 
   13882 	/* Configure PHY Rx Control register */
   13883 
   13884 	/* Enable PHY wakeup in MAC register */
   13885 
   13886 	/* Configure and enable PHY wakeup in PHY registers */
   13887 
   13888 	/* Activate PHY wakeup */
   13889 
   13890 	/* XXX */
   13891 #endif
   13892 }
   13893 
   13894 /* Power down workaround on D3 */
   13895 static void
   13896 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13897 {
   13898 	uint32_t reg;
   13899 	int i;
   13900 
   13901 	for (i = 0; i < 2; i++) {
   13902 		/* Disable link */
   13903 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13904 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13905 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13906 
   13907 		/*
   13908 		 * Call gig speed drop workaround on Gig disable before
   13909 		 * accessing any PHY registers
   13910 		 */
   13911 		if (sc->sc_type == WM_T_ICH8)
   13912 			wm_gig_downshift_workaround_ich8lan(sc);
   13913 
   13914 		/* Write VR power-down enable */
   13915 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13916 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13917 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13918 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13919 
   13920 		/* Read it back and test */
   13921 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13922 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13923 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13924 			break;
   13925 
   13926 		/* Issue PHY reset and repeat at most one more time */
   13927 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13928 	}
   13929 }
   13930 
   13931 static void
   13932 wm_enable_wakeup(struct wm_softc *sc)
   13933 {
   13934 	uint32_t reg, pmreg;
   13935 	pcireg_t pmode;
   13936 
   13937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13938 		device_xname(sc->sc_dev), __func__));
   13939 
   13940 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13941 		&pmreg, NULL) == 0)
   13942 		return;
   13943 
   13944 	/* Advertise the wakeup capability */
   13945 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13946 	    | CTRL_SWDPIN(3));
   13947 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13948 
   13949 	/* ICH workaround */
   13950 	switch (sc->sc_type) {
   13951 	case WM_T_ICH8:
   13952 	case WM_T_ICH9:
   13953 	case WM_T_ICH10:
   13954 	case WM_T_PCH:
   13955 	case WM_T_PCH2:
   13956 	case WM_T_PCH_LPT:
   13957 	case WM_T_PCH_SPT:
   13958 	case WM_T_PCH_CNP:
   13959 		/* Disable gig during WOL */
   13960 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13961 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13962 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13963 		if (sc->sc_type == WM_T_PCH)
   13964 			wm_gmii_reset(sc);
   13965 
   13966 		/* Power down workaround */
   13967 		if (sc->sc_phytype == WMPHY_82577) {
   13968 			struct mii_softc *child;
   13969 
   13970 			/* Assume that the PHY is copper */
   13971 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13972 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13973 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13974 				    (768 << 5) | 25, 0x0444); /* magic num */
   13975 		}
   13976 		break;
   13977 	default:
   13978 		break;
   13979 	}
   13980 
   13981 	/* Keep the laser running on fiber adapters */
   13982 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13983 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13984 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13985 		reg |= CTRL_EXT_SWDPIN(3);
   13986 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13987 	}
   13988 
   13989 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13990 #if 0	/* for the multicast packet */
   13991 	reg |= WUFC_MC;
   13992 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13993 #endif
   13994 
   13995 	if (sc->sc_type >= WM_T_PCH)
   13996 		wm_enable_phy_wakeup(sc);
   13997 	else {
   13998 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13999 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14000 	}
   14001 
   14002 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14003 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14004 		|| (sc->sc_type == WM_T_PCH2))
   14005 		    && (sc->sc_phytype == WMPHY_IGP_3))
   14006 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14007 
   14008 	/* Request PME */
   14009 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14010 #if 0
   14011 	/* Disable WOL */
   14012 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14013 #else
   14014 	/* For WOL */
   14015 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14016 #endif
   14017 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14018 }
   14019 
   14020 /* Disable ASPM L0s and/or L1 for workaround */
   14021 static void
   14022 wm_disable_aspm(struct wm_softc *sc)
   14023 {
   14024 	pcireg_t reg, mask = 0;
   14025 	unsigned const char *str = "";
   14026 
   14027 	/*
   14028 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14029 	 * space.
   14030 	 */
   14031 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14032 		return;
   14033 
   14034 	switch (sc->sc_type) {
   14035 	case WM_T_82571:
   14036 	case WM_T_82572:
   14037 		/*
   14038 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14039 		 * State Power management L1 State (ASPM L1).
   14040 		 */
   14041 		mask = PCIE_LCSR_ASPM_L1;
   14042 		str = "L1 is";
   14043 		break;
   14044 	case WM_T_82573:
   14045 	case WM_T_82574:
   14046 	case WM_T_82583:
   14047 		/*
   14048 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14049 		 *
   14050 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14051 		 * some chipset.  The document of 82574 and 82583 says that
   14052 		 * disabling L0s with some specific chipset is sufficient,
   14053 		 * but we follow as of the Intel em driver does.
   14054 		 *
   14055 		 * References:
   14056 		 * Errata 8 of the Specification Update of i82573.
   14057 		 * Errata 20 of the Specification Update of i82574.
   14058 		 * Errata 9 of the Specification Update of i82583.
   14059 		 */
   14060 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14061 		str = "L0s and L1 are";
   14062 		break;
   14063 	default:
   14064 		return;
   14065 	}
   14066 
   14067 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14068 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14069 	reg &= ~mask;
   14070 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14071 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14072 
   14073 	/* Print only in wm_attach() */
   14074 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14075 		aprint_verbose_dev(sc->sc_dev,
   14076 		    "ASPM %s disabled to workaround the errata.\n",
   14077 			str);
   14078 }
   14079 
   14080 /* LPLU */
   14081 
   14082 static void
   14083 wm_lplu_d0_disable(struct wm_softc *sc)
   14084 {
   14085 	struct mii_data *mii = &sc->sc_mii;
   14086 	uint32_t reg;
   14087 
   14088 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14089 		device_xname(sc->sc_dev), __func__));
   14090 
   14091 	if (sc->sc_phytype == WMPHY_IFE)
   14092 		return;
   14093 
   14094 	switch (sc->sc_type) {
   14095 	case WM_T_82571:
   14096 	case WM_T_82572:
   14097 	case WM_T_82573:
   14098 	case WM_T_82575:
   14099 	case WM_T_82576:
   14100 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14101 		reg &= ~PMR_D0_LPLU;
   14102 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14103 		break;
   14104 	case WM_T_82580:
   14105 	case WM_T_I350:
   14106 	case WM_T_I210:
   14107 	case WM_T_I211:
   14108 		reg = CSR_READ(sc, WMREG_PHPM);
   14109 		reg &= ~PHPM_D0A_LPLU;
   14110 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14111 		break;
   14112 	case WM_T_82574:
   14113 	case WM_T_82583:
   14114 	case WM_T_ICH8:
   14115 	case WM_T_ICH9:
   14116 	case WM_T_ICH10:
   14117 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14118 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14119 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14120 		CSR_WRITE_FLUSH(sc);
   14121 		break;
   14122 	case WM_T_PCH:
   14123 	case WM_T_PCH2:
   14124 	case WM_T_PCH_LPT:
   14125 	case WM_T_PCH_SPT:
   14126 	case WM_T_PCH_CNP:
   14127 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14128 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14129 		if (wm_phy_resetisblocked(sc) == false)
   14130 			reg |= HV_OEM_BITS_ANEGNOW;
   14131 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14132 		break;
   14133 	default:
   14134 		break;
   14135 	}
   14136 }
   14137 
   14138 /* EEE */
   14139 
   14140 static void
   14141 wm_set_eee_i350(struct wm_softc *sc)
   14142 {
   14143 	uint32_t ipcnfg, eeer;
   14144 
   14145 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14146 	eeer = CSR_READ(sc, WMREG_EEER);
   14147 
   14148 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14149 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14150 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14151 		    | EEER_LPI_FC);
   14152 	} else {
   14153 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14154 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14155 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14156 		    | EEER_LPI_FC);
   14157 	}
   14158 
   14159 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14160 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14161 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14162 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14163 }
   14164 
   14165 /*
   14166  * Workarounds (mainly PHY related).
   14167  * Basically, PHY's workarounds are in the PHY drivers.
   14168  */
   14169 
   14170 /* Work-around for 82566 Kumeran PCS lock loss */
   14171 static void
   14172 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14173 {
   14174 	struct mii_data *mii = &sc->sc_mii;
   14175 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14176 	int i;
   14177 	int reg;
   14178 
   14179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14180 		device_xname(sc->sc_dev), __func__));
   14181 
   14182 	/* If the link is not up, do nothing */
   14183 	if ((status & STATUS_LU) == 0)
   14184 		return;
   14185 
   14186 	/* Nothing to do if the link is other than 1Gbps */
   14187 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14188 		return;
   14189 
   14190 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14191 	for (i = 0; i < 10; i++) {
   14192 		/* read twice */
   14193 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14194 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14195 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14196 			goto out;	/* GOOD! */
   14197 
   14198 		/* Reset the PHY */
   14199 		wm_reset_phy(sc);
   14200 		delay(5*1000);
   14201 	}
   14202 
   14203 	/* Disable GigE link negotiation */
   14204 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14205 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14206 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14207 
   14208 	/*
   14209 	 * Call gig speed drop workaround on Gig disable before accessing
   14210 	 * any PHY registers.
   14211 	 */
   14212 	wm_gig_downshift_workaround_ich8lan(sc);
   14213 
   14214 out:
   14215 	return;
   14216 }
   14217 
   14218 /* WOL from S5 stops working */
   14219 static void
   14220 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14221 {
   14222 	uint16_t kmreg;
   14223 
   14224 	/* Only for igp3 */
   14225 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14226 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14227 			return;
   14228 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14229 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14230 			return;
   14231 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14232 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14233 	}
   14234 }
   14235 
   14236 /*
   14237  * Workaround for pch's PHYs
   14238  * XXX should be moved to new PHY driver?
   14239  */
   14240 static void
   14241 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14242 {
   14243 
   14244 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14245 		device_xname(sc->sc_dev), __func__));
   14246 	KASSERT(sc->sc_type == WM_T_PCH);
   14247 
   14248 	if (sc->sc_phytype == WMPHY_82577)
   14249 		wm_set_mdio_slow_mode_hv(sc);
   14250 
   14251 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14252 
   14253 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14254 
   14255 	/* 82578 */
   14256 	if (sc->sc_phytype == WMPHY_82578) {
   14257 		struct mii_softc *child;
   14258 
   14259 		/*
   14260 		 * Return registers to default by doing a soft reset then
   14261 		 * writing 0x3140 to the control register
   14262 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14263 		 */
   14264 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14265 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14266 			PHY_RESET(child);
   14267 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14268 			    0x3140);
   14269 		}
   14270 	}
   14271 
   14272 	/* Select page 0 */
   14273 	sc->phy.acquire(sc);
   14274 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14275 	sc->phy.release(sc);
   14276 
   14277 	/*
   14278 	 * Configure the K1 Si workaround during phy reset assuming there is
   14279 	 * link so that it disables K1 if link is in 1Gbps.
   14280 	 */
   14281 	wm_k1_gig_workaround_hv(sc, 1);
   14282 }
   14283 
   14284 static void
   14285 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14286 {
   14287 
   14288 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14289 		device_xname(sc->sc_dev), __func__));
   14290 	KASSERT(sc->sc_type == WM_T_PCH2);
   14291 
   14292 	wm_set_mdio_slow_mode_hv(sc);
   14293 }
   14294 
   14295 static int
   14296 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14297 {
   14298 	int k1_enable = sc->sc_nvm_k1_enabled;
   14299 
   14300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14301 		device_xname(sc->sc_dev), __func__));
   14302 
   14303 	if (sc->phy.acquire(sc) != 0)
   14304 		return -1;
   14305 
   14306 	if (link) {
   14307 		k1_enable = 0;
   14308 
   14309 		/* Link stall fix for link up */
   14310 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14311 	} else {
   14312 		/* Link stall fix for link down */
   14313 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14314 	}
   14315 
   14316 	wm_configure_k1_ich8lan(sc, k1_enable);
   14317 	sc->phy.release(sc);
   14318 
   14319 	return 0;
   14320 }
   14321 
   14322 static void
   14323 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14324 {
   14325 	uint32_t reg;
   14326 
   14327 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14328 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14329 	    reg | HV_KMRN_MDIO_SLOW);
   14330 }
   14331 
   14332 static void
   14333 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14334 {
   14335 	uint32_t ctrl, ctrl_ext, tmp;
   14336 	uint16_t kmreg;
   14337 	int rv;
   14338 
   14339 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14340 	if (rv != 0)
   14341 		return;
   14342 
   14343 	if (k1_enable)
   14344 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14345 	else
   14346 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14347 
   14348 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14349 	if (rv != 0)
   14350 		return;
   14351 
   14352 	delay(20);
   14353 
   14354 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14355 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14356 
   14357 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14358 	tmp |= CTRL_FRCSPD;
   14359 
   14360 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14361 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14362 	CSR_WRITE_FLUSH(sc);
   14363 	delay(20);
   14364 
   14365 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14366 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14367 	CSR_WRITE_FLUSH(sc);
   14368 	delay(20);
   14369 
   14370 	return;
   14371 }
   14372 
   14373 /* special case - for 82575 - need to do manual init ... */
   14374 static void
   14375 wm_reset_init_script_82575(struct wm_softc *sc)
   14376 {
   14377 	/*
   14378 	 * remark: this is untested code - we have no board without EEPROM
   14379 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14380 	 */
   14381 
   14382 	/* SerDes configuration via SERDESCTRL */
   14383 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14384 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14385 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14386 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14387 
   14388 	/* CCM configuration via CCMCTL register */
   14389 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14390 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14391 
   14392 	/* PCIe lanes configuration */
   14393 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14394 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14395 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14396 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14397 
   14398 	/* PCIe PLL Configuration */
   14399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14400 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14401 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14402 }
   14403 
   14404 static void
   14405 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14406 {
   14407 	uint32_t reg;
   14408 	uint16_t nvmword;
   14409 	int rv;
   14410 
   14411 	if (sc->sc_type != WM_T_82580)
   14412 		return;
   14413 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14414 		return;
   14415 
   14416 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14417 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14418 	if (rv != 0) {
   14419 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14420 		    __func__);
   14421 		return;
   14422 	}
   14423 
   14424 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14425 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14426 		reg |= MDICNFG_DEST;
   14427 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14428 		reg |= MDICNFG_COM_MDIO;
   14429 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14430 }
   14431 
   14432 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14433 
   14434 static bool
   14435 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14436 {
   14437 	int i;
   14438 	uint32_t reg;
   14439 	uint16_t id1, id2;
   14440 
   14441 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14442 		device_xname(sc->sc_dev), __func__));
   14443 	id1 = id2 = 0xffff;
   14444 	for (i = 0; i < 2; i++) {
   14445 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14446 		if (MII_INVALIDID(id1))
   14447 			continue;
   14448 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14449 		if (MII_INVALIDID(id2))
   14450 			continue;
   14451 		break;
   14452 	}
   14453 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14454 		goto out;
   14455 	}
   14456 
   14457 	if (sc->sc_type < WM_T_PCH_LPT) {
   14458 		sc->phy.release(sc);
   14459 		wm_set_mdio_slow_mode_hv(sc);
   14460 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14461 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14462 		sc->phy.acquire(sc);
   14463 	}
   14464 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14465 		printf("XXX return with false\n");
   14466 		return false;
   14467 	}
   14468 out:
   14469 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14470 		/* Only unforce SMBus if ME is not active */
   14471 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14472 			/* Unforce SMBus mode in PHY */
   14473 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14474 			    CV_SMB_CTRL);
   14475 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14476 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14477 			    CV_SMB_CTRL, reg);
   14478 
   14479 			/* Unforce SMBus mode in MAC */
   14480 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14481 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14482 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14483 		}
   14484 	}
   14485 	return true;
   14486 }
   14487 
   14488 static void
   14489 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14490 {
   14491 	uint32_t reg;
   14492 	int i;
   14493 
   14494 	/* Set PHY Config Counter to 50msec */
   14495 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14496 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14497 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14498 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14499 
   14500 	/* Toggle LANPHYPC */
   14501 	reg = CSR_READ(sc, WMREG_CTRL);
   14502 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14503 	reg &= ~CTRL_LANPHYPC_VALUE;
   14504 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14505 	CSR_WRITE_FLUSH(sc);
   14506 	delay(1000);
   14507 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14508 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14509 	CSR_WRITE_FLUSH(sc);
   14510 
   14511 	if (sc->sc_type < WM_T_PCH_LPT)
   14512 		delay(50 * 1000);
   14513 	else {
   14514 		i = 20;
   14515 
   14516 		do {
   14517 			delay(5 * 1000);
   14518 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14519 		    && i--);
   14520 
   14521 		delay(30 * 1000);
   14522 	}
   14523 }
   14524 
   14525 static int
   14526 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14527 {
   14528 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14529 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14530 	uint32_t rxa;
   14531 	uint16_t scale = 0, lat_enc = 0;
   14532 	int32_t obff_hwm = 0;
   14533 	int64_t lat_ns, value;
   14534 
   14535 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14536 		device_xname(sc->sc_dev), __func__));
   14537 
   14538 	if (link) {
   14539 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14540 		uint32_t status;
   14541 		uint16_t speed;
   14542 		pcireg_t preg;
   14543 
   14544 		status = CSR_READ(sc, WMREG_STATUS);
   14545 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14546 		case STATUS_SPEED_10:
   14547 			speed = 10;
   14548 			break;
   14549 		case STATUS_SPEED_100:
   14550 			speed = 100;
   14551 			break;
   14552 		case STATUS_SPEED_1000:
   14553 			speed = 1000;
   14554 			break;
   14555 		default:
   14556 			device_printf(sc->sc_dev, "Unknown speed "
   14557 			    "(status = %08x)\n", status);
   14558 			return -1;
   14559 		}
   14560 
   14561 		/* Rx Packet Buffer Allocation size (KB) */
   14562 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14563 
   14564 		/*
   14565 		 * Determine the maximum latency tolerated by the device.
   14566 		 *
   14567 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14568 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14569 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14570 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14571 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14572 		 */
   14573 		lat_ns = ((int64_t)rxa * 1024 -
   14574 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14575 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14576 		if (lat_ns < 0)
   14577 			lat_ns = 0;
   14578 		else
   14579 			lat_ns /= speed;
   14580 		value = lat_ns;
   14581 
   14582 		while (value > LTRV_VALUE) {
   14583 			scale ++;
   14584 			value = howmany(value, __BIT(5));
   14585 		}
   14586 		if (scale > LTRV_SCALE_MAX) {
   14587 			printf("%s: Invalid LTR latency scale %d\n",
   14588 			    device_xname(sc->sc_dev), scale);
   14589 			return -1;
   14590 		}
   14591 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14592 
   14593 		/* Determine the maximum latency tolerated by the platform */
   14594 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14595 		    WM_PCI_LTR_CAP_LPT);
   14596 		max_snoop = preg & 0xffff;
   14597 		max_nosnoop = preg >> 16;
   14598 
   14599 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14600 
   14601 		if (lat_enc > max_ltr_enc) {
   14602 			lat_enc = max_ltr_enc;
   14603 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14604 			    * PCI_LTR_SCALETONS(
   14605 				    __SHIFTOUT(lat_enc,
   14606 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14607 		}
   14608 
   14609 		if (lat_ns) {
   14610 			lat_ns *= speed * 1000;
   14611 			lat_ns /= 8;
   14612 			lat_ns /= 1000000000;
   14613 			obff_hwm = (int32_t)(rxa - lat_ns);
   14614 		}
   14615 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14616 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14617 			    "(rxa = %d, lat_ns = %d)\n",
   14618 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14619 			return -1;
   14620 		}
   14621 	}
   14622 	/* Snoop and No-Snoop latencies the same */
   14623 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14624 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14625 
   14626 	/* Set OBFF high water mark */
   14627 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14628 	reg |= obff_hwm;
   14629 	CSR_WRITE(sc, WMREG_SVT, reg);
   14630 
   14631 	/* Enable OBFF */
   14632 	reg = CSR_READ(sc, WMREG_SVCR);
   14633 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14634 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14635 
   14636 	return 0;
   14637 }
   14638 
   14639 /*
   14640  * I210 Errata 25 and I211 Errata 10
   14641  * Slow System Clock.
   14642  */
   14643 static void
   14644 wm_pll_workaround_i210(struct wm_softc *sc)
   14645 {
   14646 	uint32_t mdicnfg, wuc;
   14647 	uint32_t reg;
   14648 	pcireg_t pcireg;
   14649 	uint32_t pmreg;
   14650 	uint16_t nvmword, tmp_nvmword;
   14651 	int phyval;
   14652 	bool wa_done = false;
   14653 	int i;
   14654 
   14655 	/* Save WUC and MDICNFG registers */
   14656 	wuc = CSR_READ(sc, WMREG_WUC);
   14657 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14658 
   14659 	reg = mdicnfg & ~MDICNFG_DEST;
   14660 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14661 
   14662 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14663 		nvmword = INVM_DEFAULT_AL;
   14664 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14665 
   14666 	/* Get Power Management cap offset */
   14667 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14668 		&pmreg, NULL) == 0)
   14669 		return;
   14670 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14671 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14672 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14673 
   14674 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14675 			break; /* OK */
   14676 		}
   14677 
   14678 		wa_done = true;
   14679 		/* Directly reset the internal PHY */
   14680 		reg = CSR_READ(sc, WMREG_CTRL);
   14681 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14682 
   14683 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14684 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14685 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14686 
   14687 		CSR_WRITE(sc, WMREG_WUC, 0);
   14688 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14689 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14690 
   14691 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14692 		    pmreg + PCI_PMCSR);
   14693 		pcireg |= PCI_PMCSR_STATE_D3;
   14694 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14695 		    pmreg + PCI_PMCSR, pcireg);
   14696 		delay(1000);
   14697 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14698 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14699 		    pmreg + PCI_PMCSR, pcireg);
   14700 
   14701 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14702 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14703 
   14704 		/* Restore WUC register */
   14705 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14706 	}
   14707 
   14708 	/* Restore MDICNFG setting */
   14709 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14710 	if (wa_done)
   14711 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14712 }
   14713 
   14714 static void
   14715 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14716 {
   14717 	uint32_t reg;
   14718 
   14719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14720 		device_xname(sc->sc_dev), __func__));
   14721 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14722 
   14723 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14724 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14725 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14726 
   14727 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14728 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14729 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14730 }
   14731