Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.576
      1 /*	$NetBSD: if_wm.c,v 1.576 2018/04/23 01:35:25 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.576 2018/04/23 01:35:25 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544.  We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames.
    203  */
    204 #define	WM_NTXSEGS		256
    205 #define	WM_IFQUEUELEN		256
    206 #define	WM_TXQUEUELEN_MAX	64
    207 #define	WM_TXQUEUELEN_MAX_82547	16
    208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    211 #define	WM_NTXDESC_82542	256
    212 #define	WM_NTXDESC_82544	4096
    213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    218 
    219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    220 
    221 #define	WM_TXINTERQSIZE		256
    222 
    223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    225 #endif
    226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    228 #endif
    229 
    230 /*
    231  * Receive descriptor list size.  We have one Rx buffer for normal
    232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    233  * packet.  We allocate 256 receive descriptors, each with a 2k
    234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    235  */
    236 #define	WM_NRXDESC		256
    237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    240 
    241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 typedef union txdescs {
    249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    251 } txdescs_t;
    252 
    253 typedef union rxdescs {
    254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    257 } rxdescs_t;
    258 
    259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    261 
    262 /*
    263  * Software state for transmit jobs.
    264  */
    265 struct wm_txsoft {
    266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    268 	int txs_firstdesc;		/* first descriptor in packet */
    269 	int txs_lastdesc;		/* last descriptor in packet */
    270 	int txs_ndesc;			/* # of descriptors used */
    271 };
    272 
    273 /*
    274  * Software state for receive buffers.  Each descriptor gets a
    275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    276  * more than one buffer, we chain them together.
    277  */
    278 struct wm_rxsoft {
    279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    281 };
    282 
    283 #define WM_LINKUP_TIMEOUT	50
    284 
    285 static uint16_t swfwphysem[] = {
    286 	SWFW_PHY0_SM,
    287 	SWFW_PHY1_SM,
    288 	SWFW_PHY2_SM,
    289 	SWFW_PHY3_SM
    290 };
    291 
    292 static const uint32_t wm_82580_rxpbs_table[] = {
    293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    294 };
    295 
    296 struct wm_softc;
    297 
    298 #ifdef WM_EVENT_COUNTERS
    299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    301 	struct evcnt qname##_ev_##evname;
    302 
    303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    304 	do{								\
    305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    307 		    "%s%02d%s", #qname, (qnum), #evname);		\
    308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    309 		    (evtype), NULL, (xname),				\
    310 		    (q)->qname##_##evname##_evcnt_name);		\
    311 	}while(0)
    312 
    313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    315 
    316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    318 
    319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    320 	evcnt_detach(&(q)->qname##_ev_##evname);
    321 #endif /* WM_EVENT_COUNTERS */
    322 
    323 struct wm_txqueue {
    324 	kmutex_t *txq_lock;		/* lock for tx operations */
    325 
    326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    327 
    328 	/* Software state for the transmit descriptors. */
    329 	int txq_num;			/* must be a power of two */
    330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    331 
    332 	/* TX control data structures. */
    333 	int txq_ndesc;			/* must be a power of two */
    334 	size_t txq_descsize;		/* a tx descriptor size */
    335 	txdescs_t *txq_descs_u;
    336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    338 	int txq_desc_rseg;		/* real number of control segment */
    339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    340 #define	txq_descs	txq_descs_u->sctxu_txdescs
    341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    342 
    343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    344 
    345 	int txq_free;			/* number of free Tx descriptors */
    346 	int txq_next;			/* next ready Tx descriptor */
    347 
    348 	int txq_sfree;			/* number of free Tx jobs */
    349 	int txq_snext;			/* next free Tx job */
    350 	int txq_sdirty;			/* dirty Tx jobs */
    351 
    352 	/* These 4 variables are used only on the 82547. */
    353 	int txq_fifo_size;		/* Tx FIFO size */
    354 	int txq_fifo_head;		/* current head of FIFO */
    355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    357 
    358 	/*
    359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    360 	 * CPUs. This queue intermediate them without block.
    361 	 */
    362 	pcq_t *txq_interq;
    363 
    364 	/*
    365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    366 	 * to manage Tx H/W queue's busy flag.
    367 	 */
    368 	int txq_flags;			/* flags for H/W queue, see below */
    369 #define	WM_TXQ_NO_SPACE	0x1
    370 
    371 	bool txq_stopping;
    372 
    373 	bool txq_sending;
    374 	time_t txq_lastsent;
    375 
    376 	uint32_t txq_packets;		/* for AIM */
    377 	uint32_t txq_bytes;		/* for AIM */
    378 #ifdef WM_EVENT_COUNTERS
    379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    384 						/* XXX not used? */
    385 
    386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq, txtusum)		/* TCP/UDP cksums comp. out-bound */
    388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    394 
    395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    396 
    397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    399 #endif /* WM_EVENT_COUNTERS */
    400 };
    401 
    402 struct wm_rxqueue {
    403 	kmutex_t *rxq_lock;		/* lock for rx operations */
    404 
    405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    406 
    407 	/* Software state for the receive descriptors. */
    408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    409 
    410 	/* RX control data structures. */
    411 	int rxq_ndesc;			/* must be a power of two */
    412 	size_t rxq_descsize;		/* a rx descriptor size */
    413 	rxdescs_t *rxq_descs_u;
    414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    416 	int rxq_desc_rseg;		/* real number of control segment */
    417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    421 
    422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    423 
    424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    425 	int rxq_discard;
    426 	int rxq_len;
    427 	struct mbuf *rxq_head;
    428 	struct mbuf *rxq_tail;
    429 	struct mbuf **rxq_tailp;
    430 
    431 	bool rxq_stopping;
    432 
    433 	uint32_t rxq_packets;		/* for AIM */
    434 	uint32_t rxq_bytes;		/* for AIM */
    435 #ifdef WM_EVENT_COUNTERS
    436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    438 
    439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    441 #endif
    442 };
    443 
    444 struct wm_queue {
    445 	int wmq_id;			/* index of TX/RX queues */
    446 	int wmq_intr_idx;		/* index of MSI-X tables */
    447 
    448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    449 	bool wmq_set_itr;
    450 
    451 	struct wm_txqueue wmq_txq;
    452 	struct wm_rxqueue wmq_rxq;
    453 
    454 	void *wmq_si;
    455 };
    456 
    457 struct wm_phyop {
    458 	int (*acquire)(struct wm_softc *);
    459 	void (*release)(struct wm_softc *);
    460 	int reset_delay_us;
    461 };
    462 
    463 struct wm_nvmop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    467 };
    468 
    469 /*
    470  * Software state per device.
    471  */
    472 struct wm_softc {
    473 	device_t sc_dev;		/* generic device information */
    474 	bus_space_tag_t sc_st;		/* bus space tag */
    475 	bus_space_handle_t sc_sh;	/* bus space handle */
    476 	bus_size_t sc_ss;		/* bus space size */
    477 	bus_space_tag_t sc_iot;		/* I/O space tag */
    478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    479 	bus_size_t sc_ios;		/* I/O space size */
    480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    482 	bus_size_t sc_flashs;		/* flash registers space size */
    483 	off_t sc_flashreg_offset;	/*
    484 					 * offset to flash registers from
    485 					 * start of BAR
    486 					 */
    487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    488 
    489 	struct ethercom sc_ethercom;	/* ethernet common data */
    490 	struct mii_data sc_mii;		/* MII/media information */
    491 
    492 	pci_chipset_tag_t sc_pc;
    493 	pcitag_t sc_pcitag;
    494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    496 
    497 	uint16_t sc_pcidevid;		/* PCI device ID */
    498 	wm_chip_type sc_type;		/* MAC type */
    499 	int sc_rev;			/* MAC revision */
    500 	wm_phy_type sc_phytype;		/* PHY type */
    501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    502 #define	WM_MEDIATYPE_UNKNOWN		0x00
    503 #define	WM_MEDIATYPE_FIBER		0x01
    504 #define	WM_MEDIATYPE_COPPER		0x02
    505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    507 	int sc_flags;			/* flags; see below */
    508 	int sc_if_flags;		/* last if_flags */
    509 	int sc_flowflags;		/* 802.3x flow control flags */
    510 	int sc_align_tweak;
    511 
    512 	void *sc_ihs[WM_MAX_NINTR];	/*
    513 					 * interrupt cookie.
    514 					 * - legacy and msi use sc_ihs[0] only
    515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    516 					 */
    517 	pci_intr_handle_t *sc_intrs;	/*
    518 					 * legacy and msi use sc_intrs[0] only
    519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    520 					 */
    521 	int sc_nintrs;			/* number of interrupts */
    522 
    523 	int sc_link_intr_idx;		/* index of MSI-X tables */
    524 
    525 	callout_t sc_tick_ch;		/* tick callout */
    526 	bool sc_core_stopping;
    527 
    528 	int sc_nvm_ver_major;
    529 	int sc_nvm_ver_minor;
    530 	int sc_nvm_ver_build;
    531 	int sc_nvm_addrbits;		/* NVM address bits */
    532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    533 	int sc_ich8_flash_base;
    534 	int sc_ich8_flash_bank_size;
    535 	int sc_nvm_k1_enabled;
    536 
    537 	int sc_nqueues;
    538 	struct wm_queue *sc_queue;
    539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    543 
    544 	int sc_affinity_offset;
    545 
    546 #ifdef WM_EVENT_COUNTERS
    547 	/* Event counters. */
    548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    549 
    550         /* WM_T_82542_2_1 only */
    551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    556 #endif /* WM_EVENT_COUNTERS */
    557 
    558 	/* This variable are used only on the 82547. */
    559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    560 
    561 	uint32_t sc_ctrl;		/* prototype CTRL register */
    562 #if 0
    563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    564 #endif
    565 	uint32_t sc_icr;		/* prototype interrupt bits */
    566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    567 	uint32_t sc_tctl;		/* prototype TCTL register */
    568 	uint32_t sc_rctl;		/* prototype RCTL register */
    569 	uint32_t sc_txcw;		/* prototype TXCW register */
    570 	uint32_t sc_tipg;		/* prototype TIPG register */
    571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    572 	uint32_t sc_pba;		/* prototype PBA register */
    573 
    574 	int sc_tbi_linkup;		/* TBI link status */
    575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    577 
    578 	int sc_mchash_type;		/* multicast filter offset */
    579 
    580 	krndsource_t rnd_source;	/* random source */
    581 
    582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    583 
    584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    585 	kmutex_t *sc_ich_phymtx;	/*
    586 					 * 82574/82583/ICH/PCH specific PHY
    587 					 * mutex. For 82574/82583, the mutex
    588 					 * is used for both PHY and NVM.
    589 					 */
    590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    591 
    592 	struct wm_phyop phy;
    593 	struct wm_nvmop nvm;
    594 };
    595 
    596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    599 
    600 #define	WM_RXCHAIN_RESET(rxq)						\
    601 do {									\
    602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    603 	*(rxq)->rxq_tailp = NULL;					\
    604 	(rxq)->rxq_len = 0;						\
    605 } while (/*CONSTCOND*/0)
    606 
    607 #define	WM_RXCHAIN_LINK(rxq, m)						\
    608 do {									\
    609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    610 	(rxq)->rxq_tailp = &(m)->m_next;				\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #ifdef WM_EVENT_COUNTERS
    614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    616 
    617 #define WM_Q_EVCNT_INCR(qname, evname)			\
    618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    621 #else /* !WM_EVENT_COUNTERS */
    622 #define	WM_EVCNT_INCR(ev)	/* nothing */
    623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    627 #endif /* !WM_EVENT_COUNTERS */
    628 
    629 #define	CSR_READ(sc, reg)						\
    630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    631 #define	CSR_WRITE(sc, reg, val)						\
    632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    633 #define	CSR_WRITE_FLUSH(sc)						\
    634 	(void) CSR_READ((sc), WMREG_STATUS)
    635 
    636 #define ICH8_FLASH_READ32(sc, reg)					\
    637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    638 	    (reg) + sc->sc_flashreg_offset)
    639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    641 	    (reg) + sc->sc_flashreg_offset, (data))
    642 
    643 #define ICH8_FLASH_READ16(sc, reg)					\
    644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    645 	    (reg) + sc->sc_flashreg_offset)
    646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset, (data))
    649 
    650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    652 
    653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    654 #define	WM_CDTXADDR_HI(txq, x)						\
    655 	(sizeof(bus_addr_t) == 8 ?					\
    656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    657 
    658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    659 #define	WM_CDRXADDR_HI(rxq, x)						\
    660 	(sizeof(bus_addr_t) == 8 ?					\
    661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    662 
    663 /*
    664  * Register read/write functions.
    665  * Other than CSR_{READ|WRITE}().
    666  */
    667 #if 0
    668 static inline uint32_t wm_io_read(struct wm_softc *, int);
    669 #endif
    670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    672 	uint32_t, uint32_t);
    673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    674 
    675 /*
    676  * Descriptor sync/init functions.
    677  */
    678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    681 
    682 /*
    683  * Device driver interface functions and commonly used functions.
    684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    685  */
    686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    687 static int	wm_match(device_t, cfdata_t, void *);
    688 static void	wm_attach(device_t, device_t, void *);
    689 static int	wm_detach(device_t, int);
    690 static bool	wm_suspend(device_t, const pmf_qual_t *);
    691 static bool	wm_resume(device_t, const pmf_qual_t *);
    692 static void	wm_watchdog(struct ifnet *);
    693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    694     uint16_t *);
    695 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    696     uint16_t *);
    697 static void	wm_tick(void *);
    698 static int	wm_ifflags_cb(struct ethercom *);
    699 static int	wm_ioctl(struct ifnet *, u_long, void *);
    700 /* MAC address related */
    701 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    702 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    703 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    704 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    705 static void	wm_set_filter(struct wm_softc *);
    706 /* Reset and init related */
    707 static void	wm_set_vlan(struct wm_softc *);
    708 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    709 static void	wm_get_auto_rd_done(struct wm_softc *);
    710 static void	wm_lan_init_done(struct wm_softc *);
    711 static void	wm_get_cfg_done(struct wm_softc *);
    712 static void	wm_phy_post_reset(struct wm_softc *);
    713 static void	wm_write_smbus_addr(struct wm_softc *);
    714 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    715 static void	wm_initialize_hardware_bits(struct wm_softc *);
    716 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    717 static void	wm_reset_phy(struct wm_softc *);
    718 static void	wm_flush_desc_rings(struct wm_softc *);
    719 static void	wm_reset(struct wm_softc *);
    720 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    721 static void	wm_rxdrain(struct wm_rxqueue *);
    722 static void	wm_init_rss(struct wm_softc *);
    723 static void	wm_adjust_qnum(struct wm_softc *, int);
    724 static inline bool	wm_is_using_msix(struct wm_softc *);
    725 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    726 static int	wm_softint_establish(struct wm_softc *, int, int);
    727 static int	wm_setup_legacy(struct wm_softc *);
    728 static int	wm_setup_msix(struct wm_softc *);
    729 static int	wm_init(struct ifnet *);
    730 static int	wm_init_locked(struct ifnet *);
    731 static void	wm_unset_stopping_flags(struct wm_softc *);
    732 static void	wm_set_stopping_flags(struct wm_softc *);
    733 static void	wm_stop(struct ifnet *, int);
    734 static void	wm_stop_locked(struct ifnet *, int);
    735 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    736 static void	wm_82547_txfifo_stall(void *);
    737 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    738 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    739 /* DMA related */
    740 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    741 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    742 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    743 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    744     struct wm_txqueue *);
    745 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    746 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    747 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    748     struct wm_rxqueue *);
    749 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    752 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    754 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    758     struct wm_rxqueue *);
    759 static int	wm_alloc_txrx_queues(struct wm_softc *);
    760 static void	wm_free_txrx_queues(struct wm_softc *);
    761 static int	wm_init_txrx_queues(struct wm_softc *);
    762 /* Start */
    763 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    764     struct wm_txsoft *, uint32_t *, uint8_t *);
    765 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    766 static void	wm_start(struct ifnet *);
    767 static void	wm_start_locked(struct ifnet *);
    768 static int	wm_transmit(struct ifnet *, struct mbuf *);
    769 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    770 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    771     bool);
    772 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    774 static void	wm_nq_start(struct ifnet *);
    775 static void	wm_nq_start_locked(struct ifnet *);
    776 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    777 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    778 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    779     bool);
    780 static void	wm_deferred_start_locked(struct wm_txqueue *);
    781 static void	wm_handle_queue(void *);
    782 /* Interrupt */
    783 static bool	wm_txeof(struct wm_txqueue *, u_int);
    784 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    785 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    786 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    787 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    788 static void	wm_linkintr(struct wm_softc *, uint32_t);
    789 static int	wm_intr_legacy(void *);
    790 static inline void	wm_txrxintr_disable(struct wm_queue *);
    791 static inline void	wm_txrxintr_enable(struct wm_queue *);
    792 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    793 static int	wm_txrxintr_msix(void *);
    794 static int	wm_linkintr_msix(void *);
    795 
    796 /*
    797  * Media related.
    798  * GMII, SGMII, TBI, SERDES and SFP.
    799  */
    800 /* Common */
    801 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    802 /* GMII related */
    803 static void	wm_gmii_reset(struct wm_softc *);
    804 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    805 static int	wm_get_phy_id_82575(struct wm_softc *);
    806 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    807 static int	wm_gmii_mediachange(struct ifnet *);
    808 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    809 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    810 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    811 static int	wm_gmii_i82543_readreg(device_t, int, int);
    812 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    813 static int	wm_gmii_mdic_readreg(device_t, int, int);
    814 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    815 static int	wm_gmii_i82544_readreg(device_t, int, int);
    816 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    817 static int	wm_gmii_i80003_readreg(device_t, int, int);
    818 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    819 static int	wm_gmii_bm_readreg(device_t, int, int);
    820 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    821 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    822 static int	wm_gmii_hv_readreg(device_t, int, int);
    823 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    824 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    825 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    826 static int	wm_gmii_82580_readreg(device_t, int, int);
    827 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    828 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    829 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    830 static void	wm_gmii_statchg(struct ifnet *);
    831 /*
    832  * kumeran related (80003, ICH* and PCH*).
    833  * These functions are not for accessing MII registers but for accessing
    834  * kumeran specific registers.
    835  */
    836 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    837 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    838 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    839 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    840 /* SGMII */
    841 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    842 static int	wm_sgmii_readreg(device_t, int, int);
    843 static void	wm_sgmii_writereg(device_t, int, int, int);
    844 /* TBI related */
    845 static void	wm_tbi_mediainit(struct wm_softc *);
    846 static int	wm_tbi_mediachange(struct ifnet *);
    847 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    848 static int	wm_check_for_link(struct wm_softc *);
    849 static void	wm_tbi_tick(struct wm_softc *);
    850 /* SERDES related */
    851 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    852 static int	wm_serdes_mediachange(struct ifnet *);
    853 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    854 static void	wm_serdes_tick(struct wm_softc *);
    855 /* SFP related */
    856 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    857 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    858 
    859 /*
    860  * NVM related.
    861  * Microwire, SPI (w/wo EERD) and Flash.
    862  */
    863 /* Misc functions */
    864 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    865 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    866 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    867 /* Microwire */
    868 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    869 /* SPI */
    870 static int	wm_nvm_ready_spi(struct wm_softc *);
    871 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    872 /* Using with EERD */
    873 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    874 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    875 /* Flash */
    876 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    877     unsigned int *);
    878 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    879 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    880 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    881 	uint32_t *);
    882 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    883 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    884 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    885 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    886 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    887 /* iNVM */
    888 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    889 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    890 /* Lock, detecting NVM type, validate checksum and read */
    891 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    892 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    893 static int	wm_nvm_validate_checksum(struct wm_softc *);
    894 static void	wm_nvm_version_invm(struct wm_softc *);
    895 static void	wm_nvm_version(struct wm_softc *);
    896 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    897 
    898 /*
    899  * Hardware semaphores.
    900  * Very complexed...
    901  */
    902 static int	wm_get_null(struct wm_softc *);
    903 static void	wm_put_null(struct wm_softc *);
    904 static int	wm_get_eecd(struct wm_softc *);
    905 static void	wm_put_eecd(struct wm_softc *);
    906 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    907 static void	wm_put_swsm_semaphore(struct wm_softc *);
    908 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    909 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    910 static int	wm_get_nvm_80003(struct wm_softc *);
    911 static void	wm_put_nvm_80003(struct wm_softc *);
    912 static int	wm_get_nvm_82571(struct wm_softc *);
    913 static void	wm_put_nvm_82571(struct wm_softc *);
    914 static int	wm_get_phy_82575(struct wm_softc *);
    915 static void	wm_put_phy_82575(struct wm_softc *);
    916 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    917 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    918 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    919 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    920 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    921 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    922 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    923 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    924 
    925 /*
    926  * Management mode and power management related subroutines.
    927  * BMC, AMT, suspend/resume and EEE.
    928  */
    929 #if 0
    930 static int	wm_check_mng_mode(struct wm_softc *);
    931 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    932 static int	wm_check_mng_mode_82574(struct wm_softc *);
    933 static int	wm_check_mng_mode_generic(struct wm_softc *);
    934 #endif
    935 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    936 static bool	wm_phy_resetisblocked(struct wm_softc *);
    937 static void	wm_get_hw_control(struct wm_softc *);
    938 static void	wm_release_hw_control(struct wm_softc *);
    939 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    940 static void	wm_smbustopci(struct wm_softc *);
    941 static void	wm_init_manageability(struct wm_softc *);
    942 static void	wm_release_manageability(struct wm_softc *);
    943 static void	wm_get_wakeup(struct wm_softc *);
    944 static void	wm_ulp_disable(struct wm_softc *);
    945 static void	wm_enable_phy_wakeup(struct wm_softc *);
    946 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    947 static void	wm_enable_wakeup(struct wm_softc *);
    948 static void	wm_disable_aspm(struct wm_softc *);
    949 /* LPLU (Low Power Link Up) */
    950 static void	wm_lplu_d0_disable(struct wm_softc *);
    951 /* EEE */
    952 static void	wm_set_eee_i350(struct wm_softc *);
    953 
    954 /*
    955  * Workarounds (mainly PHY related).
    956  * Basically, PHY's workarounds are in the PHY drivers.
    957  */
    958 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    960 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    961 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    962 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    963 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    964 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    965 static void	wm_reset_init_script_82575(struct wm_softc *);
    966 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    967 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    968 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    969 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    970 static void	wm_pll_workaround_i210(struct wm_softc *);
    971 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    972 
    973 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    974     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    975 
    976 /*
    977  * Devices supported by this driver.
    978  */
    979 static const struct wm_product {
    980 	pci_vendor_id_t		wmp_vendor;
    981 	pci_product_id_t	wmp_product;
    982 	const char		*wmp_name;
    983 	wm_chip_type		wmp_type;
    984 	uint32_t		wmp_flags;
    985 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    986 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    987 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    988 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    989 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    990 } wm_products[] = {
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    992 	  "Intel i82542 1000BASE-X Ethernet",
    993 	  WM_T_82542_2_1,	WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    996 	  "Intel i82543GC 1000BASE-X Ethernet",
    997 	  WM_T_82543,		WMP_F_FIBER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1000 	  "Intel i82543GC 1000BASE-T Ethernet",
   1001 	  WM_T_82543,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1004 	  "Intel i82544EI 1000BASE-T Ethernet",
   1005 	  WM_T_82544,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1008 	  "Intel i82544EI 1000BASE-X Ethernet",
   1009 	  WM_T_82544,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1012 	  "Intel i82544GC 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1016 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1017 	  WM_T_82544,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1020 	  "Intel i82540EM 1000BASE-T Ethernet",
   1021 	  WM_T_82540,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1024 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82540,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1028 	  "Intel i82540EP 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1032 	  "Intel i82540EP 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1040 	  "Intel i82545EM 1000BASE-T Ethernet",
   1041 	  WM_T_82545,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1044 	  "Intel i82545GM 1000BASE-T Ethernet",
   1045 	  WM_T_82545_3,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1048 	  "Intel i82545GM 1000BASE-X Ethernet",
   1049 	  WM_T_82545_3,		WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1052 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1053 	  WM_T_82545_3,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1056 	  "Intel i82546EB 1000BASE-T Ethernet",
   1057 	  WM_T_82546,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1060 	  "Intel i82546EB 1000BASE-T Ethernet",
   1061 	  WM_T_82546,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1064 	  "Intel i82545EM 1000BASE-X Ethernet",
   1065 	  WM_T_82545,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1068 	  "Intel i82546EB 1000BASE-X Ethernet",
   1069 	  WM_T_82546,		WMP_F_FIBER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1072 	  "Intel i82546GB 1000BASE-T Ethernet",
   1073 	  WM_T_82546_3,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1076 	  "Intel i82546GB 1000BASE-X Ethernet",
   1077 	  WM_T_82546_3,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1080 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82546_3,		WMP_F_SERDES },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1084 	  "i82546GB quad-port Gigabit Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1088 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1092 	  "Intel PRO/1000MT (82546GB)",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1096 	  "Intel i82541EI 1000BASE-T Ethernet",
   1097 	  WM_T_82541,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1100 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1101 	  WM_T_82541,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1104 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1108 	  "Intel i82541ER 1000BASE-T Ethernet",
   1109 	  WM_T_82541_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1112 	  "Intel i82541GI 1000BASE-T Ethernet",
   1113 	  WM_T_82541_2,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1116 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1120 	  "Intel i82541PI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1124 	  "Intel i82547EI 1000BASE-T Ethernet",
   1125 	  WM_T_82547,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1128 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1129 	  WM_T_82547,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1132 	  "Intel i82547GI 1000BASE-T Ethernet",
   1133 	  WM_T_82547_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1136 	  "Intel PRO/1000 PT (82571EB)",
   1137 	  WM_T_82571,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1140 	  "Intel PRO/1000 PF (82571EB)",
   1141 	  WM_T_82571,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1144 	  "Intel PRO/1000 PB (82571EB)",
   1145 	  WM_T_82571,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1148 	  "Intel PRO/1000 QT (82571EB)",
   1149 	  WM_T_82571,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1152 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1153 	  WM_T_82571,		WMP_F_COPPER, },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1156 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1157 	  WM_T_82571,		WMP_F_COPPER, },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1160 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1161 	  WM_T_82571,		WMP_F_SERDES, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1164 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1165 	  WM_T_82571,		WMP_F_SERDES, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1168 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1169 	  WM_T_82571,		WMP_F_FIBER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1172 	  "Intel i82572EI 1000baseT Ethernet",
   1173 	  WM_T_82572,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1176 	  "Intel i82572EI 1000baseX Ethernet",
   1177 	  WM_T_82572,		WMP_F_FIBER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1180 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1181 	  WM_T_82572,		WMP_F_SERDES },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1184 	  "Intel i82572EI 1000baseT Ethernet",
   1185 	  WM_T_82572,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1188 	  "Intel i82573E",
   1189 	  WM_T_82573,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1192 	  "Intel i82573E IAMT",
   1193 	  WM_T_82573,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1196 	  "Intel i82573L Gigabit Ethernet",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1200 	  "Intel i82574L",
   1201 	  WM_T_82574,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1204 	  "Intel i82574L",
   1205 	  WM_T_82574,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1208 	  "Intel i82583V",
   1209 	  WM_T_82583,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1212 	  "i80003 dual 1000baseT Ethernet",
   1213 	  WM_T_80003,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1216 	  "i80003 dual 1000baseX Ethernet",
   1217 	  WM_T_80003,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1220 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1221 	  WM_T_80003,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1224 	  "Intel i80003 1000baseT Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1228 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1232 	  "Intel i82801H (M_AMT) LAN Controller",
   1233 	  WM_T_ICH8,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1235 	  "Intel i82801H (AMT) LAN Controller",
   1236 	  WM_T_ICH8,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1238 	  "Intel i82801H LAN Controller",
   1239 	  WM_T_ICH8,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1241 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1242 	  WM_T_ICH8,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1244 	  "Intel i82801H (M) LAN Controller",
   1245 	  WM_T_ICH8,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1247 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1248 	  WM_T_ICH8,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1250 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1251 	  WM_T_ICH8,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1253 	  "82567V-3 LAN Controller",
   1254 	  WM_T_ICH8,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1256 	  "82801I (AMT) LAN Controller",
   1257 	  WM_T_ICH9,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1259 	  "82801I 10/100 LAN Controller",
   1260 	  WM_T_ICH9,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1262 	  "82801I (G) 10/100 LAN Controller",
   1263 	  WM_T_ICH9,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1265 	  "82801I (GT) 10/100 LAN Controller",
   1266 	  WM_T_ICH9,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1268 	  "82801I (C) LAN Controller",
   1269 	  WM_T_ICH9,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1271 	  "82801I mobile LAN Controller",
   1272 	  WM_T_ICH9,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1274 	  "82801I mobile (V) LAN Controller",
   1275 	  WM_T_ICH9,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1277 	  "82801I mobile (AMT) LAN Controller",
   1278 	  WM_T_ICH9,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1280 	  "82567LM-4 LAN Controller",
   1281 	  WM_T_ICH9,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1283 	  "82567LM-2 LAN Controller",
   1284 	  WM_T_ICH10,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1286 	  "82567LF-2 LAN Controller",
   1287 	  WM_T_ICH10,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1289 	  "82567LM-3 LAN Controller",
   1290 	  WM_T_ICH10,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1292 	  "82567LF-3 LAN Controller",
   1293 	  WM_T_ICH10,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1295 	  "82567V-2 LAN Controller",
   1296 	  WM_T_ICH10,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1298 	  "82567V-3? LAN Controller",
   1299 	  WM_T_ICH10,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1301 	  "HANKSVILLE LAN Controller",
   1302 	  WM_T_ICH10,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1304 	  "PCH LAN (82577LM) Controller",
   1305 	  WM_T_PCH,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1307 	  "PCH LAN (82577LC) Controller",
   1308 	  WM_T_PCH,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1310 	  "PCH LAN (82578DM) Controller",
   1311 	  WM_T_PCH,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1313 	  "PCH LAN (82578DC) Controller",
   1314 	  WM_T_PCH,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1316 	  "PCH2 LAN (82579LM) Controller",
   1317 	  WM_T_PCH2,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1319 	  "PCH2 LAN (82579V) Controller",
   1320 	  WM_T_PCH2,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1322 	  "82575EB dual-1000baseT Ethernet",
   1323 	  WM_T_82575,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1325 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1326 	  WM_T_82575,		WMP_F_SERDES },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1328 	  "82575GB quad-1000baseT Ethernet",
   1329 	  WM_T_82575,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1331 	  "82575GB quad-1000baseT Ethernet (PM)",
   1332 	  WM_T_82575,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1334 	  "82576 1000BaseT Ethernet",
   1335 	  WM_T_82576,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1337 	  "82576 1000BaseX Ethernet",
   1338 	  WM_T_82576,		WMP_F_FIBER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1341 	  "82576 gigabit Ethernet (SERDES)",
   1342 	  WM_T_82576,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1345 	  "82576 quad-1000BaseT Ethernet",
   1346 	  WM_T_82576,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1349 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1350 	  WM_T_82576,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1353 	  "82576 gigabit Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1357 	  "82576 gigabit Ethernet (SERDES)",
   1358 	  WM_T_82576,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1360 	  "82576 quad-gigabit Ethernet (SERDES)",
   1361 	  WM_T_82576,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1364 	  "82580 1000BaseT Ethernet",
   1365 	  WM_T_82580,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1367 	  "82580 1000BaseX Ethernet",
   1368 	  WM_T_82580,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1371 	  "82580 1000BaseT Ethernet (SERDES)",
   1372 	  WM_T_82580,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1375 	  "82580 gigabit Ethernet (SGMII)",
   1376 	  WM_T_82580,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1378 	  "82580 dual-1000BaseT Ethernet",
   1379 	  WM_T_82580,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1382 	  "82580 quad-1000BaseX Ethernet",
   1383 	  WM_T_82580,		WMP_F_FIBER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1386 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1390 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1391 	  WM_T_82580,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1394 	  "DH89XXCC 1000BASE-KX Ethernet",
   1395 	  WM_T_82580,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1398 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1402 	  "I350 Gigabit Network Connection",
   1403 	  WM_T_I350,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1406 	  "I350 Gigabit Fiber Network Connection",
   1407 	  WM_T_I350,		WMP_F_FIBER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1410 	  "I350 Gigabit Backplane Connection",
   1411 	  WM_T_I350,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1414 	  "I350 Quad Port Gigabit Ethernet",
   1415 	  WM_T_I350,		WMP_F_SERDES },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1418 	  "I350 Gigabit Connection",
   1419 	  WM_T_I350,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1422 	  "I354 Gigabit Ethernet (KX)",
   1423 	  WM_T_I354,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1426 	  "I354 Gigabit Ethernet (SGMII)",
   1427 	  WM_T_I354,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1430 	  "I354 Gigabit Ethernet (2.5G)",
   1431 	  WM_T_I354,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1434 	  "I210-T1 Ethernet Server Adapter",
   1435 	  WM_T_I210,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1438 	  "I210 Ethernet (Copper OEM)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1442 	  "I210 Ethernet (Copper IT)",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1446 	  "I210 Ethernet (FLASH less)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1450 	  "I210 Gigabit Ethernet (Fiber)",
   1451 	  WM_T_I210,		WMP_F_FIBER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1454 	  "I210 Gigabit Ethernet (SERDES)",
   1455 	  WM_T_I210,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1458 	  "I210 Gigabit Ethernet (FLASH less)",
   1459 	  WM_T_I210,		WMP_F_SERDES },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1462 	  "I210 Gigabit Ethernet (SGMII)",
   1463 	  WM_T_I210,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1466 	  "I211 Ethernet (COPPER)",
   1467 	  WM_T_I211,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1469 	  "I217 V Ethernet Connection",
   1470 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1472 	  "I217 LM Ethernet Connection",
   1473 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1475 	  "I218 V Ethernet Connection",
   1476 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1478 	  "I218 V Ethernet Connection",
   1479 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1481 	  "I218 V Ethernet Connection",
   1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1484 	  "I218 LM Ethernet Connection",
   1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1487 	  "I218 LM Ethernet Connection",
   1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1490 	  "I218 LM Ethernet Connection",
   1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1493 	  "I219 V Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1496 	  "I219 V Ethernet Connection",
   1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1499 	  "I219 V Ethernet Connection",
   1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1502 	  "I219 V Ethernet Connection",
   1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1505 	  "I219 LM Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1508 	  "I219 LM Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1511 	  "I219 LM Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1514 	  "I219 LM Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1517 	  "I219 LM Ethernet Connection",
   1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1520 	  "I219 V Ethernet Connection",
   1521 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1523 	  "I219 V Ethernet Connection",
   1524 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1529 	  "I219 LM Ethernet Connection",
   1530 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1531 	{ 0,			0,
   1532 	  NULL,
   1533 	  0,			0 },
   1534 };
   1535 
   1536 /*
   1537  * Register read/write functions.
   1538  * Other than CSR_{READ|WRITE}().
   1539  */
   1540 
   1541 #if 0 /* Not currently used */
   1542 static inline uint32_t
   1543 wm_io_read(struct wm_softc *sc, int reg)
   1544 {
   1545 
   1546 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1547 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1548 }
   1549 #endif
   1550 
   1551 static inline void
   1552 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1553 {
   1554 
   1555 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1556 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1557 }
   1558 
   1559 static inline void
   1560 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1561     uint32_t data)
   1562 {
   1563 	uint32_t regval;
   1564 	int i;
   1565 
   1566 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1567 
   1568 	CSR_WRITE(sc, reg, regval);
   1569 
   1570 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1571 		delay(5);
   1572 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1573 			break;
   1574 	}
   1575 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1576 		aprint_error("%s: WARNING:"
   1577 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1578 		    device_xname(sc->sc_dev), reg);
   1579 	}
   1580 }
   1581 
   1582 static inline void
   1583 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1584 {
   1585 	wa->wa_low = htole32(v & 0xffffffffU);
   1586 	if (sizeof(bus_addr_t) == 8)
   1587 		wa->wa_high = htole32((uint64_t) v >> 32);
   1588 	else
   1589 		wa->wa_high = 0;
   1590 }
   1591 
   1592 /*
   1593  * Descriptor sync/init functions.
   1594  */
   1595 static inline void
   1596 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1597 {
   1598 	struct wm_softc *sc = txq->txq_sc;
   1599 
   1600 	/* If it will wrap around, sync to the end of the ring. */
   1601 	if ((start + num) > WM_NTXDESC(txq)) {
   1602 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1603 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1604 		    (WM_NTXDESC(txq) - start), ops);
   1605 		num -= (WM_NTXDESC(txq) - start);
   1606 		start = 0;
   1607 	}
   1608 
   1609 	/* Now sync whatever is left. */
   1610 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1612 }
   1613 
   1614 static inline void
   1615 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1616 {
   1617 	struct wm_softc *sc = rxq->rxq_sc;
   1618 
   1619 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1620 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1621 }
   1622 
   1623 static inline void
   1624 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1625 {
   1626 	struct wm_softc *sc = rxq->rxq_sc;
   1627 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1628 	struct mbuf *m = rxs->rxs_mbuf;
   1629 
   1630 	/*
   1631 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1632 	 * so that the payload after the Ethernet header is aligned
   1633 	 * to a 4-byte boundary.
   1634 
   1635 	 * XXX BRAINDAMAGE ALERT!
   1636 	 * The stupid chip uses the same size for every buffer, which
   1637 	 * is set in the Receive Control register.  We are using the 2K
   1638 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1639 	 * reason, we can't "scoot" packets longer than the standard
   1640 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1641 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1642 	 * the upper layer copy the headers.
   1643 	 */
   1644 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1645 
   1646 	if (sc->sc_type == WM_T_82574) {
   1647 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1648 		rxd->erx_data.erxd_addr =
   1649 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1650 		rxd->erx_data.erxd_dd = 0;
   1651 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1652 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1653 
   1654 		rxd->nqrx_data.nrxd_paddr =
   1655 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1656 		/* Currently, split header is not supported. */
   1657 		rxd->nqrx_data.nrxd_haddr = 0;
   1658 	} else {
   1659 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1660 
   1661 		wm_set_dma_addr(&rxd->wrx_addr,
   1662 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1663 		rxd->wrx_len = 0;
   1664 		rxd->wrx_cksum = 0;
   1665 		rxd->wrx_status = 0;
   1666 		rxd->wrx_errors = 0;
   1667 		rxd->wrx_special = 0;
   1668 	}
   1669 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1670 
   1671 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1672 }
   1673 
   1674 /*
   1675  * Device driver interface functions and commonly used functions.
   1676  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1677  */
   1678 
   1679 /* Lookup supported device table */
   1680 static const struct wm_product *
   1681 wm_lookup(const struct pci_attach_args *pa)
   1682 {
   1683 	const struct wm_product *wmp;
   1684 
   1685 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1686 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1687 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1688 			return wmp;
   1689 	}
   1690 	return NULL;
   1691 }
   1692 
   1693 /* The match function (ca_match) */
   1694 static int
   1695 wm_match(device_t parent, cfdata_t cf, void *aux)
   1696 {
   1697 	struct pci_attach_args *pa = aux;
   1698 
   1699 	if (wm_lookup(pa) != NULL)
   1700 		return 1;
   1701 
   1702 	return 0;
   1703 }
   1704 
   1705 /* The attach function (ca_attach) */
   1706 static void
   1707 wm_attach(device_t parent, device_t self, void *aux)
   1708 {
   1709 	struct wm_softc *sc = device_private(self);
   1710 	struct pci_attach_args *pa = aux;
   1711 	prop_dictionary_t dict;
   1712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1713 	pci_chipset_tag_t pc = pa->pa_pc;
   1714 	int counts[PCI_INTR_TYPE_SIZE];
   1715 	pci_intr_type_t max_type;
   1716 	const char *eetype, *xname;
   1717 	bus_space_tag_t memt;
   1718 	bus_space_handle_t memh;
   1719 	bus_size_t memsize;
   1720 	int memh_valid;
   1721 	int i, error;
   1722 	const struct wm_product *wmp;
   1723 	prop_data_t ea;
   1724 	prop_number_t pn;
   1725 	uint8_t enaddr[ETHER_ADDR_LEN];
   1726 	char buf[256];
   1727 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1728 	pcireg_t preg, memtype;
   1729 	uint16_t eeprom_data, apme_mask;
   1730 	bool force_clear_smbi;
   1731 	uint32_t link_mode;
   1732 	uint32_t reg;
   1733 
   1734 	sc->sc_dev = self;
   1735 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1736 	sc->sc_core_stopping = false;
   1737 
   1738 	wmp = wm_lookup(pa);
   1739 #ifdef DIAGNOSTIC
   1740 	if (wmp == NULL) {
   1741 		printf("\n");
   1742 		panic("wm_attach: impossible");
   1743 	}
   1744 #endif
   1745 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1746 
   1747 	sc->sc_pc = pa->pa_pc;
   1748 	sc->sc_pcitag = pa->pa_tag;
   1749 
   1750 	if (pci_dma64_available(pa))
   1751 		sc->sc_dmat = pa->pa_dmat64;
   1752 	else
   1753 		sc->sc_dmat = pa->pa_dmat;
   1754 
   1755 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1756 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1757 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1758 
   1759 	sc->sc_type = wmp->wmp_type;
   1760 
   1761 	/* Set default function pointers */
   1762 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1763 	sc->phy.release = sc->nvm.release = wm_put_null;
   1764 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1765 
   1766 	if (sc->sc_type < WM_T_82543) {
   1767 		if (sc->sc_rev < 2) {
   1768 			aprint_error_dev(sc->sc_dev,
   1769 			    "i82542 must be at least rev. 2\n");
   1770 			return;
   1771 		}
   1772 		if (sc->sc_rev < 3)
   1773 			sc->sc_type = WM_T_82542_2_0;
   1774 	}
   1775 
   1776 	/*
   1777 	 * Disable MSI for Errata:
   1778 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1779 	 *
   1780 	 *  82544: Errata 25
   1781 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1782 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1783 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1784 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1785 	 *
   1786 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1787 	 *
   1788 	 *  82571 & 82572: Errata 63
   1789 	 */
   1790 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1791 	    || (sc->sc_type == WM_T_82572))
   1792 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1793 
   1794 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1795 	    || (sc->sc_type == WM_T_82580)
   1796 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1797 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1798 		sc->sc_flags |= WM_F_NEWQUEUE;
   1799 
   1800 	/* Set device properties (mactype) */
   1801 	dict = device_properties(sc->sc_dev);
   1802 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1803 
   1804 	/*
   1805 	 * Map the device.  All devices support memory-mapped acccess,
   1806 	 * and it is really required for normal operation.
   1807 	 */
   1808 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1809 	switch (memtype) {
   1810 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1811 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1812 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1813 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1814 		break;
   1815 	default:
   1816 		memh_valid = 0;
   1817 		break;
   1818 	}
   1819 
   1820 	if (memh_valid) {
   1821 		sc->sc_st = memt;
   1822 		sc->sc_sh = memh;
   1823 		sc->sc_ss = memsize;
   1824 	} else {
   1825 		aprint_error_dev(sc->sc_dev,
   1826 		    "unable to map device registers\n");
   1827 		return;
   1828 	}
   1829 
   1830 	/*
   1831 	 * In addition, i82544 and later support I/O mapped indirect
   1832 	 * register access.  It is not desirable (nor supported in
   1833 	 * this driver) to use it for normal operation, though it is
   1834 	 * required to work around bugs in some chip versions.
   1835 	 */
   1836 	if (sc->sc_type >= WM_T_82544) {
   1837 		/* First we have to find the I/O BAR. */
   1838 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1839 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1840 			if (memtype == PCI_MAPREG_TYPE_IO)
   1841 				break;
   1842 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1843 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1844 				i += 4;	/* skip high bits, too */
   1845 		}
   1846 		if (i < PCI_MAPREG_END) {
   1847 			/*
   1848 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1849 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1850 			 * It's no problem because newer chips has no this
   1851 			 * bug.
   1852 			 *
   1853 			 * The i8254x doesn't apparently respond when the
   1854 			 * I/O BAR is 0, which looks somewhat like it's not
   1855 			 * been configured.
   1856 			 */
   1857 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1858 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1859 				aprint_error_dev(sc->sc_dev,
   1860 				    "WARNING: I/O BAR at zero.\n");
   1861 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1862 					0, &sc->sc_iot, &sc->sc_ioh,
   1863 					NULL, &sc->sc_ios) == 0) {
   1864 				sc->sc_flags |= WM_F_IOH_VALID;
   1865 			} else {
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "WARNING: unable to map I/O space\n");
   1868 			}
   1869 		}
   1870 
   1871 	}
   1872 
   1873 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1874 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1875 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1876 	if (sc->sc_type < WM_T_82542_2_1)
   1877 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1878 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1879 
   1880 	/* power up chip */
   1881 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1882 	    NULL)) && error != EOPNOTSUPP) {
   1883 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1884 		return;
   1885 	}
   1886 
   1887 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1888 	/*
   1889 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1890 	 * resource.
   1891 	 */
   1892 	if (sc->sc_nqueues > 1) {
   1893 		max_type = PCI_INTR_TYPE_MSIX;
   1894 		/*
   1895 		 *  82583 has a MSI-X capability in the PCI configuration space
   1896 		 * but it doesn't support it. At least the document doesn't
   1897 		 * say anything about MSI-X.
   1898 		 */
   1899 		counts[PCI_INTR_TYPE_MSIX]
   1900 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1901 	} else {
   1902 		max_type = PCI_INTR_TYPE_MSI;
   1903 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1904 	}
   1905 
   1906 	/* Allocation settings */
   1907 	counts[PCI_INTR_TYPE_MSI] = 1;
   1908 	counts[PCI_INTR_TYPE_INTX] = 1;
   1909 	/* overridden by disable flags */
   1910 	if (wm_disable_msi != 0) {
   1911 		counts[PCI_INTR_TYPE_MSI] = 0;
   1912 		if (wm_disable_msix != 0) {
   1913 			max_type = PCI_INTR_TYPE_INTX;
   1914 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1915 		}
   1916 	} else if (wm_disable_msix != 0) {
   1917 		max_type = PCI_INTR_TYPE_MSI;
   1918 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1919 	}
   1920 
   1921 alloc_retry:
   1922 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1923 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1924 		return;
   1925 	}
   1926 
   1927 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1928 		error = wm_setup_msix(sc);
   1929 		if (error) {
   1930 			pci_intr_release(pc, sc->sc_intrs,
   1931 			    counts[PCI_INTR_TYPE_MSIX]);
   1932 
   1933 			/* Setup for MSI: Disable MSI-X */
   1934 			max_type = PCI_INTR_TYPE_MSI;
   1935 			counts[PCI_INTR_TYPE_MSI] = 1;
   1936 			counts[PCI_INTR_TYPE_INTX] = 1;
   1937 			goto alloc_retry;
   1938 		}
   1939 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1940 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1941 		error = wm_setup_legacy(sc);
   1942 		if (error) {
   1943 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1944 			    counts[PCI_INTR_TYPE_MSI]);
   1945 
   1946 			/* The next try is for INTx: Disable MSI */
   1947 			max_type = PCI_INTR_TYPE_INTX;
   1948 			counts[PCI_INTR_TYPE_INTX] = 1;
   1949 			goto alloc_retry;
   1950 		}
   1951 	} else {
   1952 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1953 		error = wm_setup_legacy(sc);
   1954 		if (error) {
   1955 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1956 			    counts[PCI_INTR_TYPE_INTX]);
   1957 			return;
   1958 		}
   1959 	}
   1960 
   1961 	/*
   1962 	 * Check the function ID (unit number of the chip).
   1963 	 */
   1964 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1965 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1966 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1967 	    || (sc->sc_type == WM_T_82580)
   1968 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1969 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1970 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1971 	else
   1972 		sc->sc_funcid = 0;
   1973 
   1974 	/*
   1975 	 * Determine a few things about the bus we're connected to.
   1976 	 */
   1977 	if (sc->sc_type < WM_T_82543) {
   1978 		/* We don't really know the bus characteristics here. */
   1979 		sc->sc_bus_speed = 33;
   1980 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1981 		/*
   1982 		 * CSA (Communication Streaming Architecture) is about as fast
   1983 		 * a 32-bit 66MHz PCI Bus.
   1984 		 */
   1985 		sc->sc_flags |= WM_F_CSA;
   1986 		sc->sc_bus_speed = 66;
   1987 		aprint_verbose_dev(sc->sc_dev,
   1988 		    "Communication Streaming Architecture\n");
   1989 		if (sc->sc_type == WM_T_82547) {
   1990 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1991 			callout_setfunc(&sc->sc_txfifo_ch,
   1992 					wm_82547_txfifo_stall, sc);
   1993 			aprint_verbose_dev(sc->sc_dev,
   1994 			    "using 82547 Tx FIFO stall work-around\n");
   1995 		}
   1996 	} else if (sc->sc_type >= WM_T_82571) {
   1997 		sc->sc_flags |= WM_F_PCIE;
   1998 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1999 		    && (sc->sc_type != WM_T_ICH10)
   2000 		    && (sc->sc_type != WM_T_PCH)
   2001 		    && (sc->sc_type != WM_T_PCH2)
   2002 		    && (sc->sc_type != WM_T_PCH_LPT)
   2003 		    && (sc->sc_type != WM_T_PCH_SPT)
   2004 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2005 			/* ICH* and PCH* have no PCIe capability registers */
   2006 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2007 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2008 				NULL) == 0)
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unable to find PCIe capability\n");
   2011 		}
   2012 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2013 	} else {
   2014 		reg = CSR_READ(sc, WMREG_STATUS);
   2015 		if (reg & STATUS_BUS64)
   2016 			sc->sc_flags |= WM_F_BUS64;
   2017 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2018 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2019 
   2020 			sc->sc_flags |= WM_F_PCIX;
   2021 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2022 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unable to find PCIX capability\n");
   2025 			else if (sc->sc_type != WM_T_82545_3 &&
   2026 				 sc->sc_type != WM_T_82546_3) {
   2027 				/*
   2028 				 * Work around a problem caused by the BIOS
   2029 				 * setting the max memory read byte count
   2030 				 * incorrectly.
   2031 				 */
   2032 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2033 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2034 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2035 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2036 
   2037 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2038 				    PCIX_CMD_BYTECNT_SHIFT;
   2039 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2040 				    PCIX_STATUS_MAXB_SHIFT;
   2041 				if (bytecnt > maxb) {
   2042 					aprint_verbose_dev(sc->sc_dev,
   2043 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2044 					    512 << bytecnt, 512 << maxb);
   2045 					pcix_cmd = (pcix_cmd &
   2046 					    ~PCIX_CMD_BYTECNT_MASK) |
   2047 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2048 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2049 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2050 					    pcix_cmd);
   2051 				}
   2052 			}
   2053 		}
   2054 		/*
   2055 		 * The quad port adapter is special; it has a PCIX-PCIX
   2056 		 * bridge on the board, and can run the secondary bus at
   2057 		 * a higher speed.
   2058 		 */
   2059 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2060 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2061 								      : 66;
   2062 		} else if (sc->sc_flags & WM_F_PCIX) {
   2063 			switch (reg & STATUS_PCIXSPD_MASK) {
   2064 			case STATUS_PCIXSPD_50_66:
   2065 				sc->sc_bus_speed = 66;
   2066 				break;
   2067 			case STATUS_PCIXSPD_66_100:
   2068 				sc->sc_bus_speed = 100;
   2069 				break;
   2070 			case STATUS_PCIXSPD_100_133:
   2071 				sc->sc_bus_speed = 133;
   2072 				break;
   2073 			default:
   2074 				aprint_error_dev(sc->sc_dev,
   2075 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2076 				    reg & STATUS_PCIXSPD_MASK);
   2077 				sc->sc_bus_speed = 66;
   2078 				break;
   2079 			}
   2080 		} else
   2081 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2082 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2083 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2084 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2085 	}
   2086 
   2087 	/* Disable ASPM L0s and/or L1 for workaround */
   2088 	wm_disable_aspm(sc);
   2089 
   2090 	/* clear interesting stat counters */
   2091 	CSR_READ(sc, WMREG_COLC);
   2092 	CSR_READ(sc, WMREG_RXERRC);
   2093 
   2094 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2095 	    || (sc->sc_type >= WM_T_ICH8))
   2096 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2097 	if (sc->sc_type >= WM_T_ICH8)
   2098 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2099 
   2100 	/* Set PHY, NVM mutex related stuff */
   2101 	switch (sc->sc_type) {
   2102 	case WM_T_82542_2_0:
   2103 	case WM_T_82542_2_1:
   2104 	case WM_T_82543:
   2105 	case WM_T_82544:
   2106 		/* Microwire */
   2107 		sc->nvm.read = wm_nvm_read_uwire;
   2108 		sc->sc_nvm_wordsize = 64;
   2109 		sc->sc_nvm_addrbits = 6;
   2110 		break;
   2111 	case WM_T_82540:
   2112 	case WM_T_82545:
   2113 	case WM_T_82545_3:
   2114 	case WM_T_82546:
   2115 	case WM_T_82546_3:
   2116 		/* Microwire */
   2117 		sc->nvm.read = wm_nvm_read_uwire;
   2118 		reg = CSR_READ(sc, WMREG_EECD);
   2119 		if (reg & EECD_EE_SIZE) {
   2120 			sc->sc_nvm_wordsize = 256;
   2121 			sc->sc_nvm_addrbits = 8;
   2122 		} else {
   2123 			sc->sc_nvm_wordsize = 64;
   2124 			sc->sc_nvm_addrbits = 6;
   2125 		}
   2126 		sc->sc_flags |= WM_F_LOCK_EECD;
   2127 		sc->nvm.acquire = wm_get_eecd;
   2128 		sc->nvm.release = wm_put_eecd;
   2129 		break;
   2130 	case WM_T_82541:
   2131 	case WM_T_82541_2:
   2132 	case WM_T_82547:
   2133 	case WM_T_82547_2:
   2134 		reg = CSR_READ(sc, WMREG_EECD);
   2135 		/*
   2136 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2137 		 * on 8254[17], so set flags and functios before calling it.
   2138 		 */
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		if (reg & EECD_EE_TYPE) {
   2143 			/* SPI */
   2144 			sc->nvm.read = wm_nvm_read_spi;
   2145 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 		} else {
   2148 			/* Microwire */
   2149 			sc->nvm.read = wm_nvm_read_uwire;
   2150 			if ((reg & EECD_EE_ABITS) != 0) {
   2151 				sc->sc_nvm_wordsize = 256;
   2152 				sc->sc_nvm_addrbits = 8;
   2153 			} else {
   2154 				sc->sc_nvm_wordsize = 64;
   2155 				sc->sc_nvm_addrbits = 6;
   2156 			}
   2157 		}
   2158 		break;
   2159 	case WM_T_82571:
   2160 	case WM_T_82572:
   2161 		/* SPI */
   2162 		sc->nvm.read = wm_nvm_read_eerd;
   2163 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2164 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2165 		wm_nvm_set_addrbits_size_eecd(sc);
   2166 		sc->phy.acquire = wm_get_swsm_semaphore;
   2167 		sc->phy.release = wm_put_swsm_semaphore;
   2168 		sc->nvm.acquire = wm_get_nvm_82571;
   2169 		sc->nvm.release = wm_put_nvm_82571;
   2170 		break;
   2171 	case WM_T_82573:
   2172 	case WM_T_82574:
   2173 	case WM_T_82583:
   2174 		sc->nvm.read = wm_nvm_read_eerd;
   2175 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2176 		if (sc->sc_type == WM_T_82573) {
   2177 			sc->phy.acquire = wm_get_swsm_semaphore;
   2178 			sc->phy.release = wm_put_swsm_semaphore;
   2179 			sc->nvm.acquire = wm_get_nvm_82571;
   2180 			sc->nvm.release = wm_put_nvm_82571;
   2181 		} else {
   2182 			/* Both PHY and NVM use the same semaphore. */
   2183 			sc->phy.acquire = sc->nvm.acquire
   2184 			    = wm_get_swfwhw_semaphore;
   2185 			sc->phy.release = sc->nvm.release
   2186 			    = wm_put_swfwhw_semaphore;
   2187 		}
   2188 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2189 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2190 			sc->sc_nvm_wordsize = 2048;
   2191 		} else {
   2192 			/* SPI */
   2193 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 			wm_nvm_set_addrbits_size_eecd(sc);
   2195 		}
   2196 		break;
   2197 	case WM_T_82575:
   2198 	case WM_T_82576:
   2199 	case WM_T_82580:
   2200 	case WM_T_I350:
   2201 	case WM_T_I354:
   2202 	case WM_T_80003:
   2203 		/* SPI */
   2204 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2205 		wm_nvm_set_addrbits_size_eecd(sc);
   2206 		if((sc->sc_type == WM_T_80003)
   2207 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2208 			sc->nvm.read = wm_nvm_read_eerd;
   2209 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2210 		} else {
   2211 			sc->nvm.read = wm_nvm_read_spi;
   2212 			sc->sc_flags |= WM_F_LOCK_EECD;
   2213 		}
   2214 		sc->phy.acquire = wm_get_phy_82575;
   2215 		sc->phy.release = wm_put_phy_82575;
   2216 		sc->nvm.acquire = wm_get_nvm_80003;
   2217 		sc->nvm.release = wm_put_nvm_80003;
   2218 		break;
   2219 	case WM_T_ICH8:
   2220 	case WM_T_ICH9:
   2221 	case WM_T_ICH10:
   2222 	case WM_T_PCH:
   2223 	case WM_T_PCH2:
   2224 	case WM_T_PCH_LPT:
   2225 		sc->nvm.read = wm_nvm_read_ich8;
   2226 		/* FLASH */
   2227 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2228 		sc->sc_nvm_wordsize = 2048;
   2229 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2230 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2231 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2232 			aprint_error_dev(sc->sc_dev,
   2233 			    "can't map FLASH registers\n");
   2234 			goto out;
   2235 		}
   2236 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2237 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2238 		    ICH_FLASH_SECTOR_SIZE;
   2239 		sc->sc_ich8_flash_bank_size =
   2240 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2241 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2242 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2243 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2244 		sc->sc_flashreg_offset = 0;
   2245 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2246 		sc->phy.release = wm_put_swflag_ich8lan;
   2247 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2248 		sc->nvm.release = wm_put_nvm_ich8lan;
   2249 		break;
   2250 	case WM_T_PCH_SPT:
   2251 	case WM_T_PCH_CNP:
   2252 		sc->nvm.read = wm_nvm_read_spt;
   2253 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2254 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2255 		sc->sc_flasht = sc->sc_st;
   2256 		sc->sc_flashh = sc->sc_sh;
   2257 		sc->sc_ich8_flash_base = 0;
   2258 		sc->sc_nvm_wordsize =
   2259 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2260 			* NVM_SIZE_MULTIPLIER;
   2261 		/* It is size in bytes, we want words */
   2262 		sc->sc_nvm_wordsize /= 2;
   2263 		/* assume 2 banks */
   2264 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2265 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2266 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2267 		sc->phy.release = wm_put_swflag_ich8lan;
   2268 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2269 		sc->nvm.release = wm_put_nvm_ich8lan;
   2270 		break;
   2271 	case WM_T_I210:
   2272 	case WM_T_I211:
   2273 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2274 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2275 		if (wm_nvm_flash_presence_i210(sc)) {
   2276 			sc->nvm.read = wm_nvm_read_eerd;
   2277 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2278 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2279 			wm_nvm_set_addrbits_size_eecd(sc);
   2280 		} else {
   2281 			sc->nvm.read = wm_nvm_read_invm;
   2282 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2283 			sc->sc_nvm_wordsize = INVM_SIZE;
   2284 		}
   2285 		sc->phy.acquire = wm_get_phy_82575;
   2286 		sc->phy.release = wm_put_phy_82575;
   2287 		sc->nvm.acquire = wm_get_nvm_80003;
   2288 		sc->nvm.release = wm_put_nvm_80003;
   2289 		break;
   2290 	default:
   2291 		break;
   2292 	}
   2293 
   2294 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2295 	switch (sc->sc_type) {
   2296 	case WM_T_82571:
   2297 	case WM_T_82572:
   2298 		reg = CSR_READ(sc, WMREG_SWSM2);
   2299 		if ((reg & SWSM2_LOCK) == 0) {
   2300 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2301 			force_clear_smbi = true;
   2302 		} else
   2303 			force_clear_smbi = false;
   2304 		break;
   2305 	case WM_T_82573:
   2306 	case WM_T_82574:
   2307 	case WM_T_82583:
   2308 		force_clear_smbi = true;
   2309 		break;
   2310 	default:
   2311 		force_clear_smbi = false;
   2312 		break;
   2313 	}
   2314 	if (force_clear_smbi) {
   2315 		reg = CSR_READ(sc, WMREG_SWSM);
   2316 		if ((reg & SWSM_SMBI) != 0)
   2317 			aprint_error_dev(sc->sc_dev,
   2318 			    "Please update the Bootagent\n");
   2319 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2320 	}
   2321 
   2322 	/*
   2323 	 * Defer printing the EEPROM type until after verifying the checksum
   2324 	 * This allows the EEPROM type to be printed correctly in the case
   2325 	 * that no EEPROM is attached.
   2326 	 */
   2327 	/*
   2328 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2329 	 * this for later, so we can fail future reads from the EEPROM.
   2330 	 */
   2331 	if (wm_nvm_validate_checksum(sc)) {
   2332 		/*
   2333 		 * Read twice again because some PCI-e parts fail the
   2334 		 * first check due to the link being in sleep state.
   2335 		 */
   2336 		if (wm_nvm_validate_checksum(sc))
   2337 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2338 	}
   2339 
   2340 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2341 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2342 	else {
   2343 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2344 		    sc->sc_nvm_wordsize);
   2345 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2346 			aprint_verbose("iNVM");
   2347 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2348 			aprint_verbose("FLASH(HW)");
   2349 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2350 			aprint_verbose("FLASH");
   2351 		else {
   2352 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2353 				eetype = "SPI";
   2354 			else
   2355 				eetype = "MicroWire";
   2356 			aprint_verbose("(%d address bits) %s EEPROM",
   2357 			    sc->sc_nvm_addrbits, eetype);
   2358 		}
   2359 	}
   2360 	wm_nvm_version(sc);
   2361 	aprint_verbose("\n");
   2362 
   2363 	/*
   2364 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2365 	 * incorrect.
   2366 	 */
   2367 	wm_gmii_setup_phytype(sc, 0, 0);
   2368 
   2369 	/* Reset the chip to a known state. */
   2370 	wm_reset(sc);
   2371 
   2372 	/*
   2373 	 * Check for I21[01] PLL workaround.
   2374 	 *
   2375 	 * Three cases:
   2376 	 * a) Chip is I211.
   2377 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2378 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2379 	 */
   2380 	if (sc->sc_type == WM_T_I211)
   2381 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2382 	if (sc->sc_type == WM_T_I210) {
   2383 		if (!wm_nvm_flash_presence_i210(sc))
   2384 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2385 		else if ((sc->sc_nvm_ver_major < 3)
   2386 		    || ((sc->sc_nvm_ver_major == 3)
   2387 			&& (sc->sc_nvm_ver_minor < 25))) {
   2388 			aprint_verbose_dev(sc->sc_dev,
   2389 			    "ROM image version %d.%d is older than 3.25\n",
   2390 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2392 		}
   2393 	}
   2394 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2395 		wm_pll_workaround_i210(sc);
   2396 
   2397 	wm_get_wakeup(sc);
   2398 
   2399 	/* Non-AMT based hardware can now take control from firmware */
   2400 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2401 		wm_get_hw_control(sc);
   2402 
   2403 	/*
   2404 	 * Read the Ethernet address from the EEPROM, if not first found
   2405 	 * in device properties.
   2406 	 */
   2407 	ea = prop_dictionary_get(dict, "mac-address");
   2408 	if (ea != NULL) {
   2409 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2410 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2411 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2412 	} else {
   2413 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2414 			aprint_error_dev(sc->sc_dev,
   2415 			    "unable to read Ethernet address\n");
   2416 			goto out;
   2417 		}
   2418 	}
   2419 
   2420 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2421 	    ether_sprintf(enaddr));
   2422 
   2423 	/*
   2424 	 * Read the config info from the EEPROM, and set up various
   2425 	 * bits in the control registers based on their contents.
   2426 	 */
   2427 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2428 	if (pn != NULL) {
   2429 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2430 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2431 	} else {
   2432 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2433 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2434 			goto out;
   2435 		}
   2436 	}
   2437 
   2438 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2439 	if (pn != NULL) {
   2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2441 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2442 	} else {
   2443 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	/* check for WM_F_WOL */
   2450 	switch (sc->sc_type) {
   2451 	case WM_T_82542_2_0:
   2452 	case WM_T_82542_2_1:
   2453 	case WM_T_82543:
   2454 		/* dummy? */
   2455 		eeprom_data = 0;
   2456 		apme_mask = NVM_CFG3_APME;
   2457 		break;
   2458 	case WM_T_82544:
   2459 		apme_mask = NVM_CFG2_82544_APM_EN;
   2460 		eeprom_data = cfg2;
   2461 		break;
   2462 	case WM_T_82546:
   2463 	case WM_T_82546_3:
   2464 	case WM_T_82571:
   2465 	case WM_T_82572:
   2466 	case WM_T_82573:
   2467 	case WM_T_82574:
   2468 	case WM_T_82583:
   2469 	case WM_T_80003:
   2470 	default:
   2471 		apme_mask = NVM_CFG3_APME;
   2472 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2473 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2474 		break;
   2475 	case WM_T_82575:
   2476 	case WM_T_82576:
   2477 	case WM_T_82580:
   2478 	case WM_T_I350:
   2479 	case WM_T_I354: /* XXX ok? */
   2480 	case WM_T_ICH8:
   2481 	case WM_T_ICH9:
   2482 	case WM_T_ICH10:
   2483 	case WM_T_PCH:
   2484 	case WM_T_PCH2:
   2485 	case WM_T_PCH_LPT:
   2486 	case WM_T_PCH_SPT:
   2487 	case WM_T_PCH_CNP:
   2488 		/* XXX The funcid should be checked on some devices */
   2489 		apme_mask = WUC_APME;
   2490 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2491 		break;
   2492 	}
   2493 
   2494 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2495 	if ((eeprom_data & apme_mask) != 0)
   2496 		sc->sc_flags |= WM_F_WOL;
   2497 
   2498 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2499 		/* Check NVM for autonegotiation */
   2500 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2501 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2502 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2503 		}
   2504 	}
   2505 
   2506 	/*
   2507 	 * XXX need special handling for some multiple port cards
   2508 	 * to disable a paticular port.
   2509 	 */
   2510 
   2511 	if (sc->sc_type >= WM_T_82544) {
   2512 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2513 		if (pn != NULL) {
   2514 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2515 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2516 		} else {
   2517 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2518 				aprint_error_dev(sc->sc_dev,
   2519 				    "unable to read SWDPIN\n");
   2520 				goto out;
   2521 			}
   2522 		}
   2523 	}
   2524 
   2525 	if (cfg1 & NVM_CFG1_ILOS)
   2526 		sc->sc_ctrl |= CTRL_ILOS;
   2527 
   2528 	/*
   2529 	 * XXX
   2530 	 * This code isn't correct because pin 2 and 3 are located
   2531 	 * in different position on newer chips. Check all datasheet.
   2532 	 *
   2533 	 * Until resolve this problem, check if a chip < 82580
   2534 	 */
   2535 	if (sc->sc_type <= WM_T_82580) {
   2536 		if (sc->sc_type >= WM_T_82544) {
   2537 			sc->sc_ctrl |=
   2538 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2539 			    CTRL_SWDPIO_SHIFT;
   2540 			sc->sc_ctrl |=
   2541 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2542 			    CTRL_SWDPINS_SHIFT;
   2543 		} else {
   2544 			sc->sc_ctrl |=
   2545 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2546 			    CTRL_SWDPIO_SHIFT;
   2547 		}
   2548 	}
   2549 
   2550 	/* XXX For other than 82580? */
   2551 	if (sc->sc_type == WM_T_82580) {
   2552 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2553 		if (nvmword & __BIT(13))
   2554 			sc->sc_ctrl |= CTRL_ILOS;
   2555 	}
   2556 
   2557 #if 0
   2558 	if (sc->sc_type >= WM_T_82544) {
   2559 		if (cfg1 & NVM_CFG1_IPS0)
   2560 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2561 		if (cfg1 & NVM_CFG1_IPS1)
   2562 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2563 		sc->sc_ctrl_ext |=
   2564 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2565 		    CTRL_EXT_SWDPIO_SHIFT;
   2566 		sc->sc_ctrl_ext |=
   2567 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2568 		    CTRL_EXT_SWDPINS_SHIFT;
   2569 	} else {
   2570 		sc->sc_ctrl_ext |=
   2571 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2572 		    CTRL_EXT_SWDPIO_SHIFT;
   2573 	}
   2574 #endif
   2575 
   2576 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2577 #if 0
   2578 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2579 #endif
   2580 
   2581 	if (sc->sc_type == WM_T_PCH) {
   2582 		uint16_t val;
   2583 
   2584 		/* Save the NVM K1 bit setting */
   2585 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2586 
   2587 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2588 			sc->sc_nvm_k1_enabled = 1;
   2589 		else
   2590 			sc->sc_nvm_k1_enabled = 0;
   2591 	}
   2592 
   2593 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2594 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2595 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2596 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2597 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2598 	    || sc->sc_type == WM_T_82573
   2599 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2600 		/* Copper only */
   2601 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2602 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2603 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2604 	    || (sc->sc_type ==WM_T_I211)) {
   2605 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2606 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2607 		switch (link_mode) {
   2608 		case CTRL_EXT_LINK_MODE_1000KX:
   2609 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2610 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2611 			break;
   2612 		case CTRL_EXT_LINK_MODE_SGMII:
   2613 			if (wm_sgmii_uses_mdio(sc)) {
   2614 				aprint_verbose_dev(sc->sc_dev,
   2615 				    "SGMII(MDIO)\n");
   2616 				sc->sc_flags |= WM_F_SGMII;
   2617 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2618 				break;
   2619 			}
   2620 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2621 			/*FALLTHROUGH*/
   2622 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2623 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2624 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2625 				if (link_mode
   2626 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2627 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2628 					sc->sc_flags |= WM_F_SGMII;
   2629 				} else {
   2630 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2631 					aprint_verbose_dev(sc->sc_dev,
   2632 					    "SERDES\n");
   2633 				}
   2634 				break;
   2635 			}
   2636 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2637 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2638 
   2639 			/* Change current link mode setting */
   2640 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2641 			switch (sc->sc_mediatype) {
   2642 			case WM_MEDIATYPE_COPPER:
   2643 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2644 				break;
   2645 			case WM_MEDIATYPE_SERDES:
   2646 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2647 				break;
   2648 			default:
   2649 				break;
   2650 			}
   2651 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2652 			break;
   2653 		case CTRL_EXT_LINK_MODE_GMII:
   2654 		default:
   2655 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2656 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2657 			break;
   2658 		}
   2659 
   2660 		reg &= ~CTRL_EXT_I2C_ENA;
   2661 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2662 			reg |= CTRL_EXT_I2C_ENA;
   2663 		else
   2664 			reg &= ~CTRL_EXT_I2C_ENA;
   2665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2666 	} else if (sc->sc_type < WM_T_82543 ||
   2667 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2668 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2669 			aprint_error_dev(sc->sc_dev,
   2670 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2671 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2672 		}
   2673 	} else {
   2674 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2675 			aprint_error_dev(sc->sc_dev,
   2676 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2677 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2678 		}
   2679 	}
   2680 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2681 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2682 
   2683 	/* Set device properties (macflags) */
   2684 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2685 
   2686 	/* Initialize the media structures accordingly. */
   2687 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2688 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2689 	else
   2690 		wm_tbi_mediainit(sc); /* All others */
   2691 
   2692 	ifp = &sc->sc_ethercom.ec_if;
   2693 	xname = device_xname(sc->sc_dev);
   2694 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2695 	ifp->if_softc = sc;
   2696 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2697 #ifdef WM_MPSAFE
   2698 	ifp->if_extflags = IFEF_MPSAFE;
   2699 #endif
   2700 	ifp->if_ioctl = wm_ioctl;
   2701 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2702 		ifp->if_start = wm_nq_start;
   2703 		/*
   2704 		 * When the number of CPUs is one and the controller can use
   2705 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2706 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2707 		 * and the other is used for link status changing.
   2708 		 * In this situation, wm_nq_transmit() is disadvantageous
   2709 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2710 		 */
   2711 		if (wm_is_using_multiqueue(sc))
   2712 			ifp->if_transmit = wm_nq_transmit;
   2713 	} else {
   2714 		ifp->if_start = wm_start;
   2715 		/*
   2716 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2717 		 */
   2718 		if (wm_is_using_multiqueue(sc))
   2719 			ifp->if_transmit = wm_transmit;
   2720 	}
   2721 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2722 	ifp->if_init = wm_init;
   2723 	ifp->if_stop = wm_stop;
   2724 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2725 	IFQ_SET_READY(&ifp->if_snd);
   2726 
   2727 	/* Check for jumbo frame */
   2728 	switch (sc->sc_type) {
   2729 	case WM_T_82573:
   2730 		/* XXX limited to 9234 if ASPM is disabled */
   2731 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2732 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2733 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2734 		break;
   2735 	case WM_T_82571:
   2736 	case WM_T_82572:
   2737 	case WM_T_82574:
   2738 	case WM_T_82583:
   2739 	case WM_T_82575:
   2740 	case WM_T_82576:
   2741 	case WM_T_82580:
   2742 	case WM_T_I350:
   2743 	case WM_T_I354:
   2744 	case WM_T_I210:
   2745 	case WM_T_I211:
   2746 	case WM_T_80003:
   2747 	case WM_T_ICH9:
   2748 	case WM_T_ICH10:
   2749 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2750 	case WM_T_PCH_LPT:
   2751 	case WM_T_PCH_SPT:
   2752 	case WM_T_PCH_CNP:
   2753 		/* XXX limited to 9234 */
   2754 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2755 		break;
   2756 	case WM_T_PCH:
   2757 		/* XXX limited to 4096 */
   2758 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2759 		break;
   2760 	case WM_T_82542_2_0:
   2761 	case WM_T_82542_2_1:
   2762 	case WM_T_ICH8:
   2763 		/* No support for jumbo frame */
   2764 		break;
   2765 	default:
   2766 		/* ETHER_MAX_LEN_JUMBO */
   2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2768 		break;
   2769 	}
   2770 
   2771 	/* If we're a i82543 or greater, we can support VLANs. */
   2772 	if (sc->sc_type >= WM_T_82543)
   2773 		sc->sc_ethercom.ec_capabilities |=
   2774 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2775 
   2776 	/*
   2777 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2778 	 * on i82543 and later.
   2779 	 */
   2780 	if (sc->sc_type >= WM_T_82543) {
   2781 		ifp->if_capabilities |=
   2782 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2783 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2784 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2785 		    IFCAP_CSUM_TCPv6_Tx |
   2786 		    IFCAP_CSUM_UDPv6_Tx;
   2787 	}
   2788 
   2789 	/*
   2790 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2791 	 *
   2792 	 *	82541GI (8086:1076) ... no
   2793 	 *	82572EI (8086:10b9) ... yes
   2794 	 */
   2795 	if (sc->sc_type >= WM_T_82571) {
   2796 		ifp->if_capabilities |=
   2797 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2798 	}
   2799 
   2800 	/*
   2801 	 * If we're a i82544 or greater (except i82547), we can do
   2802 	 * TCP segmentation offload.
   2803 	 */
   2804 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2805 		ifp->if_capabilities |= IFCAP_TSOv4;
   2806 	}
   2807 
   2808 	if (sc->sc_type >= WM_T_82571) {
   2809 		ifp->if_capabilities |= IFCAP_TSOv6;
   2810 	}
   2811 
   2812 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2813 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2814 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2815 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2816 
   2817 #ifdef WM_MPSAFE
   2818 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2819 #else
   2820 	sc->sc_core_lock = NULL;
   2821 #endif
   2822 
   2823 	/* Attach the interface. */
   2824 	error = if_initialize(ifp);
   2825 	if (error != 0) {
   2826 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2827 		    error);
   2828 		return; /* Error */
   2829 	}
   2830 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2831 	ether_ifattach(ifp, enaddr);
   2832 	if_register(ifp);
   2833 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2834 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2835 			  RND_FLAG_DEFAULT);
   2836 
   2837 #ifdef WM_EVENT_COUNTERS
   2838 	/* Attach event counters. */
   2839 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2840 	    NULL, xname, "linkintr");
   2841 
   2842 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2843 	    NULL, xname, "tx_xoff");
   2844 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2845 	    NULL, xname, "tx_xon");
   2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2847 	    NULL, xname, "rx_xoff");
   2848 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2849 	    NULL, xname, "rx_xon");
   2850 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2851 	    NULL, xname, "rx_macctl");
   2852 #endif /* WM_EVENT_COUNTERS */
   2853 
   2854 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2855 		pmf_class_network_register(self, ifp);
   2856 	else
   2857 		aprint_error_dev(self, "couldn't establish power handler\n");
   2858 
   2859 	sc->sc_flags |= WM_F_ATTACHED;
   2860  out:
   2861 	return;
   2862 }
   2863 
   2864 /* The detach function (ca_detach) */
   2865 static int
   2866 wm_detach(device_t self, int flags __unused)
   2867 {
   2868 	struct wm_softc *sc = device_private(self);
   2869 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2870 	int i;
   2871 
   2872 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2873 		return 0;
   2874 
   2875 	/* Stop the interface. Callouts are stopped in it. */
   2876 	wm_stop(ifp, 1);
   2877 
   2878 	pmf_device_deregister(self);
   2879 
   2880 #ifdef WM_EVENT_COUNTERS
   2881 	evcnt_detach(&sc->sc_ev_linkintr);
   2882 
   2883 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2884 	evcnt_detach(&sc->sc_ev_tx_xon);
   2885 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2886 	evcnt_detach(&sc->sc_ev_rx_xon);
   2887 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2888 #endif /* WM_EVENT_COUNTERS */
   2889 
   2890 	/* Tell the firmware about the release */
   2891 	WM_CORE_LOCK(sc);
   2892 	wm_release_manageability(sc);
   2893 	wm_release_hw_control(sc);
   2894 	wm_enable_wakeup(sc);
   2895 	WM_CORE_UNLOCK(sc);
   2896 
   2897 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2898 
   2899 	/* Delete all remaining media. */
   2900 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2901 
   2902 	ether_ifdetach(ifp);
   2903 	if_detach(ifp);
   2904 	if_percpuq_destroy(sc->sc_ipq);
   2905 
   2906 	/* Unload RX dmamaps and free mbufs */
   2907 	for (i = 0; i < sc->sc_nqueues; i++) {
   2908 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2909 		mutex_enter(rxq->rxq_lock);
   2910 		wm_rxdrain(rxq);
   2911 		mutex_exit(rxq->rxq_lock);
   2912 	}
   2913 	/* Must unlock here */
   2914 
   2915 	/* Disestablish the interrupt handler */
   2916 	for (i = 0; i < sc->sc_nintrs; i++) {
   2917 		if (sc->sc_ihs[i] != NULL) {
   2918 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2919 			sc->sc_ihs[i] = NULL;
   2920 		}
   2921 	}
   2922 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2923 
   2924 	wm_free_txrx_queues(sc);
   2925 
   2926 	/* Unmap the registers */
   2927 	if (sc->sc_ss) {
   2928 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2929 		sc->sc_ss = 0;
   2930 	}
   2931 	if (sc->sc_ios) {
   2932 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2933 		sc->sc_ios = 0;
   2934 	}
   2935 	if (sc->sc_flashs) {
   2936 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2937 		sc->sc_flashs = 0;
   2938 	}
   2939 
   2940 	if (sc->sc_core_lock)
   2941 		mutex_obj_free(sc->sc_core_lock);
   2942 	if (sc->sc_ich_phymtx)
   2943 		mutex_obj_free(sc->sc_ich_phymtx);
   2944 	if (sc->sc_ich_nvmmtx)
   2945 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2946 
   2947 	return 0;
   2948 }
   2949 
   2950 static bool
   2951 wm_suspend(device_t self, const pmf_qual_t *qual)
   2952 {
   2953 	struct wm_softc *sc = device_private(self);
   2954 
   2955 	wm_release_manageability(sc);
   2956 	wm_release_hw_control(sc);
   2957 	wm_enable_wakeup(sc);
   2958 
   2959 	return true;
   2960 }
   2961 
   2962 static bool
   2963 wm_resume(device_t self, const pmf_qual_t *qual)
   2964 {
   2965 	struct wm_softc *sc = device_private(self);
   2966 
   2967 	/* Disable ASPM L0s and/or L1 for workaround */
   2968 	wm_disable_aspm(sc);
   2969 	wm_init_manageability(sc);
   2970 
   2971 	return true;
   2972 }
   2973 
   2974 /*
   2975  * wm_watchdog:		[ifnet interface function]
   2976  *
   2977  *	Watchdog timer handler.
   2978  */
   2979 static void
   2980 wm_watchdog(struct ifnet *ifp)
   2981 {
   2982 	int qid;
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2985 
   2986 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2987 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2988 
   2989 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2990 	}
   2991 
   2992 	/*
   2993 	 * IF any of queues hanged up, reset the interface.
   2994 	 */
   2995 	if (hang_queue != 0) {
   2996 		(void) wm_init(ifp);
   2997 
   2998 		/*
   2999 		 * There are still some upper layer processing which call
   3000 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3001 		 */
   3002 		/* Try to get more packets going. */
   3003 		ifp->if_start(ifp);
   3004 	}
   3005 }
   3006 
   3007 
   3008 static void
   3009 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3010 {
   3011 
   3012 	mutex_enter(txq->txq_lock);
   3013 	if (txq->txq_sending &&
   3014 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3015 		wm_watchdog_txq_locked(ifp, txq, hang);
   3016 	}
   3017 	mutex_exit(txq->txq_lock);
   3018 }
   3019 
   3020 static void
   3021 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3022     uint16_t *hang)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3026 
   3027 	KASSERT(mutex_owned(txq->txq_lock));
   3028 
   3029 	/*
   3030 	 * Since we're using delayed interrupts, sweep up
   3031 	 * before we report an error.
   3032 	 */
   3033 	wm_txeof(txq, UINT_MAX);
   3034 
   3035 	if (txq->txq_sending)
   3036 		*hang |= __BIT(wmq->wmq_id);
   3037 
   3038 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3039 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3040 		    device_xname(sc->sc_dev));
   3041 	} else {
   3042 #ifdef WM_DEBUG
   3043 		int i, j;
   3044 		struct wm_txsoft *txs;
   3045 #endif
   3046 		log(LOG_ERR,
   3047 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3048 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3049 		    txq->txq_next);
   3050 		ifp->if_oerrors++;
   3051 #ifdef WM_DEBUG
   3052 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3053 		    i = WM_NEXTTXS(txq, i)) {
   3054 		    txs = &txq->txq_soft[i];
   3055 		    printf("txs %d tx %d -> %d\n",
   3056 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3057 		    for (j = txs->txs_firstdesc; ;
   3058 			j = WM_NEXTTX(txq, j)) {
   3059 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3060 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3061 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3062 				    printf("\t %#08x%08x\n",
   3063 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3064 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3065 			    } else {
   3066 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3067 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3068 					txq->txq_descs[j].wtx_addr.wa_low);
   3069 				    printf("\t %#04x%02x%02x%08x\n",
   3070 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3071 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3072 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3073 					txq->txq_descs[j].wtx_cmdlen);
   3074 			    }
   3075 			if (j == txs->txs_lastdesc)
   3076 				break;
   3077 			}
   3078 		}
   3079 #endif
   3080 	}
   3081 }
   3082 
   3083 /*
   3084  * wm_tick:
   3085  *
   3086  *	One second timer, used to check link status, sweep up
   3087  *	completed transmit jobs, etc.
   3088  */
   3089 static void
   3090 wm_tick(void *arg)
   3091 {
   3092 	struct wm_softc *sc = arg;
   3093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3094 #ifndef WM_MPSAFE
   3095 	int s = splnet();
   3096 #endif
   3097 
   3098 	WM_CORE_LOCK(sc);
   3099 
   3100 	if (sc->sc_core_stopping) {
   3101 		WM_CORE_UNLOCK(sc);
   3102 #ifndef WM_MPSAFE
   3103 		splx(s);
   3104 #endif
   3105 		return;
   3106 	}
   3107 
   3108 	if (sc->sc_type >= WM_T_82542_2_1) {
   3109 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3110 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3111 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3112 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3113 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3114 	}
   3115 
   3116 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3117 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3118 	    + CSR_READ(sc, WMREG_CRCERRS)
   3119 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3120 	    + CSR_READ(sc, WMREG_SYMERRC)
   3121 	    + CSR_READ(sc, WMREG_RXERRC)
   3122 	    + CSR_READ(sc, WMREG_SEC)
   3123 	    + CSR_READ(sc, WMREG_CEXTERR)
   3124 	    + CSR_READ(sc, WMREG_RLEC);
   3125 	/*
   3126 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3127 	 * memory. It does not mean the number of dropped packet. Because
   3128 	 * ethernet controller can receive packets in such case if there is
   3129 	 * space in phy's FIFO.
   3130 	 *
   3131 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3132 	 * own EVCNT instead of if_iqdrops.
   3133 	 */
   3134 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3135 
   3136 	if (sc->sc_flags & WM_F_HAS_MII)
   3137 		mii_tick(&sc->sc_mii);
   3138 	else if ((sc->sc_type >= WM_T_82575)
   3139 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3140 		wm_serdes_tick(sc);
   3141 	else
   3142 		wm_tbi_tick(sc);
   3143 
   3144 	WM_CORE_UNLOCK(sc);
   3145 
   3146 	wm_watchdog(ifp);
   3147 
   3148 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3149 }
   3150 
   3151 static int
   3152 wm_ifflags_cb(struct ethercom *ec)
   3153 {
   3154 	struct ifnet *ifp = &ec->ec_if;
   3155 	struct wm_softc *sc = ifp->if_softc;
   3156 	int rc = 0;
   3157 
   3158 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3159 		device_xname(sc->sc_dev), __func__));
   3160 
   3161 	WM_CORE_LOCK(sc);
   3162 
   3163 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3164 	sc->sc_if_flags = ifp->if_flags;
   3165 
   3166 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3167 		rc = ENETRESET;
   3168 		goto out;
   3169 	}
   3170 
   3171 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3172 		wm_set_filter(sc);
   3173 
   3174 	wm_set_vlan(sc);
   3175 
   3176 out:
   3177 	WM_CORE_UNLOCK(sc);
   3178 
   3179 	return rc;
   3180 }
   3181 
   3182 /*
   3183  * wm_ioctl:		[ifnet interface function]
   3184  *
   3185  *	Handle control requests from the operator.
   3186  */
   3187 static int
   3188 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3189 {
   3190 	struct wm_softc *sc = ifp->if_softc;
   3191 	struct ifreq *ifr = (struct ifreq *) data;
   3192 	struct ifaddr *ifa = (struct ifaddr *)data;
   3193 	struct sockaddr_dl *sdl;
   3194 	int s, error;
   3195 
   3196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3197 		device_xname(sc->sc_dev), __func__));
   3198 
   3199 #ifndef WM_MPSAFE
   3200 	s = splnet();
   3201 #endif
   3202 	switch (cmd) {
   3203 	case SIOCSIFMEDIA:
   3204 	case SIOCGIFMEDIA:
   3205 		WM_CORE_LOCK(sc);
   3206 		/* Flow control requires full-duplex mode. */
   3207 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3208 		    (ifr->ifr_media & IFM_FDX) == 0)
   3209 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3210 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3211 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3212 				/* We can do both TXPAUSE and RXPAUSE. */
   3213 				ifr->ifr_media |=
   3214 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3215 			}
   3216 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3217 		}
   3218 		WM_CORE_UNLOCK(sc);
   3219 #ifdef WM_MPSAFE
   3220 		s = splnet();
   3221 #endif
   3222 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3223 #ifdef WM_MPSAFE
   3224 		splx(s);
   3225 #endif
   3226 		break;
   3227 	case SIOCINITIFADDR:
   3228 		WM_CORE_LOCK(sc);
   3229 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3230 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3231 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3232 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3233 			/* unicast address is first multicast entry */
   3234 			wm_set_filter(sc);
   3235 			error = 0;
   3236 			WM_CORE_UNLOCK(sc);
   3237 			break;
   3238 		}
   3239 		WM_CORE_UNLOCK(sc);
   3240 		/*FALLTHROUGH*/
   3241 	default:
   3242 #ifdef WM_MPSAFE
   3243 		s = splnet();
   3244 #endif
   3245 		/* It may call wm_start, so unlock here */
   3246 		error = ether_ioctl(ifp, cmd, data);
   3247 #ifdef WM_MPSAFE
   3248 		splx(s);
   3249 #endif
   3250 		if (error != ENETRESET)
   3251 			break;
   3252 
   3253 		error = 0;
   3254 
   3255 		if (cmd == SIOCSIFCAP) {
   3256 			error = (*ifp->if_init)(ifp);
   3257 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3258 			;
   3259 		else if (ifp->if_flags & IFF_RUNNING) {
   3260 			/*
   3261 			 * Multicast list has changed; set the hardware filter
   3262 			 * accordingly.
   3263 			 */
   3264 			WM_CORE_LOCK(sc);
   3265 			wm_set_filter(sc);
   3266 			WM_CORE_UNLOCK(sc);
   3267 		}
   3268 		break;
   3269 	}
   3270 
   3271 #ifndef WM_MPSAFE
   3272 	splx(s);
   3273 #endif
   3274 	return error;
   3275 }
   3276 
   3277 /* MAC address related */
   3278 
   3279 /*
   3280  * Get the offset of MAC address and return it.
   3281  * If error occured, use offset 0.
   3282  */
   3283 static uint16_t
   3284 wm_check_alt_mac_addr(struct wm_softc *sc)
   3285 {
   3286 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3287 	uint16_t offset = NVM_OFF_MACADDR;
   3288 
   3289 	/* Try to read alternative MAC address pointer */
   3290 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3291 		return 0;
   3292 
   3293 	/* Check pointer if it's valid or not. */
   3294 	if ((offset == 0x0000) || (offset == 0xffff))
   3295 		return 0;
   3296 
   3297 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3298 	/*
   3299 	 * Check whether alternative MAC address is valid or not.
   3300 	 * Some cards have non 0xffff pointer but those don't use
   3301 	 * alternative MAC address in reality.
   3302 	 *
   3303 	 * Check whether the broadcast bit is set or not.
   3304 	 */
   3305 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3306 		if (((myea[0] & 0xff) & 0x01) == 0)
   3307 			return offset; /* Found */
   3308 
   3309 	/* Not found */
   3310 	return 0;
   3311 }
   3312 
   3313 static int
   3314 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3315 {
   3316 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3317 	uint16_t offset = NVM_OFF_MACADDR;
   3318 	int do_invert = 0;
   3319 
   3320 	switch (sc->sc_type) {
   3321 	case WM_T_82580:
   3322 	case WM_T_I350:
   3323 	case WM_T_I354:
   3324 		/* EEPROM Top Level Partitioning */
   3325 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3326 		break;
   3327 	case WM_T_82571:
   3328 	case WM_T_82575:
   3329 	case WM_T_82576:
   3330 	case WM_T_80003:
   3331 	case WM_T_I210:
   3332 	case WM_T_I211:
   3333 		offset = wm_check_alt_mac_addr(sc);
   3334 		if (offset == 0)
   3335 			if ((sc->sc_funcid & 0x01) == 1)
   3336 				do_invert = 1;
   3337 		break;
   3338 	default:
   3339 		if ((sc->sc_funcid & 0x01) == 1)
   3340 			do_invert = 1;
   3341 		break;
   3342 	}
   3343 
   3344 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3345 		goto bad;
   3346 
   3347 	enaddr[0] = myea[0] & 0xff;
   3348 	enaddr[1] = myea[0] >> 8;
   3349 	enaddr[2] = myea[1] & 0xff;
   3350 	enaddr[3] = myea[1] >> 8;
   3351 	enaddr[4] = myea[2] & 0xff;
   3352 	enaddr[5] = myea[2] >> 8;
   3353 
   3354 	/*
   3355 	 * Toggle the LSB of the MAC address on the second port
   3356 	 * of some dual port cards.
   3357 	 */
   3358 	if (do_invert != 0)
   3359 		enaddr[5] ^= 1;
   3360 
   3361 	return 0;
   3362 
   3363  bad:
   3364 	return -1;
   3365 }
   3366 
   3367 /*
   3368  * wm_set_ral:
   3369  *
   3370  *	Set an entery in the receive address list.
   3371  */
   3372 static void
   3373 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3374 {
   3375 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3376 	uint32_t wlock_mac;
   3377 	int rv;
   3378 
   3379 	if (enaddr != NULL) {
   3380 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3381 		    (enaddr[3] << 24);
   3382 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3383 		ral_hi |= RAL_AV;
   3384 	} else {
   3385 		ral_lo = 0;
   3386 		ral_hi = 0;
   3387 	}
   3388 
   3389 	switch (sc->sc_type) {
   3390 	case WM_T_82542_2_0:
   3391 	case WM_T_82542_2_1:
   3392 	case WM_T_82543:
   3393 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3394 		CSR_WRITE_FLUSH(sc);
   3395 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3396 		CSR_WRITE_FLUSH(sc);
   3397 		break;
   3398 	case WM_T_PCH2:
   3399 	case WM_T_PCH_LPT:
   3400 	case WM_T_PCH_SPT:
   3401 	case WM_T_PCH_CNP:
   3402 		if (idx == 0) {
   3403 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3404 			CSR_WRITE_FLUSH(sc);
   3405 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3406 			CSR_WRITE_FLUSH(sc);
   3407 			return;
   3408 		}
   3409 		if (sc->sc_type != WM_T_PCH2) {
   3410 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3411 			    FWSM_WLOCK_MAC);
   3412 			addrl = WMREG_SHRAL(idx - 1);
   3413 			addrh = WMREG_SHRAH(idx - 1);
   3414 		} else {
   3415 			wlock_mac = 0;
   3416 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3417 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3418 		}
   3419 
   3420 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3421 			rv = wm_get_swflag_ich8lan(sc);
   3422 			if (rv != 0)
   3423 				return;
   3424 			CSR_WRITE(sc, addrl, ral_lo);
   3425 			CSR_WRITE_FLUSH(sc);
   3426 			CSR_WRITE(sc, addrh, ral_hi);
   3427 			CSR_WRITE_FLUSH(sc);
   3428 			wm_put_swflag_ich8lan(sc);
   3429 		}
   3430 
   3431 		break;
   3432 	default:
   3433 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3434 		CSR_WRITE_FLUSH(sc);
   3435 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3436 		CSR_WRITE_FLUSH(sc);
   3437 		break;
   3438 	}
   3439 }
   3440 
   3441 /*
   3442  * wm_mchash:
   3443  *
   3444  *	Compute the hash of the multicast address for the 4096-bit
   3445  *	multicast filter.
   3446  */
   3447 static uint32_t
   3448 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3449 {
   3450 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3451 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3452 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3453 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3454 	uint32_t hash;
   3455 
   3456 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3457 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3458 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3459 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3460 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3461 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3462 		return (hash & 0x3ff);
   3463 	}
   3464 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3465 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3466 
   3467 	return (hash & 0xfff);
   3468 }
   3469 
   3470 /*
   3471  * wm_set_filter:
   3472  *
   3473  *	Set up the receive filter.
   3474  */
   3475 static void
   3476 wm_set_filter(struct wm_softc *sc)
   3477 {
   3478 	struct ethercom *ec = &sc->sc_ethercom;
   3479 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3480 	struct ether_multi *enm;
   3481 	struct ether_multistep step;
   3482 	bus_addr_t mta_reg;
   3483 	uint32_t hash, reg, bit;
   3484 	int i, size, ralmax;
   3485 
   3486 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3487 		device_xname(sc->sc_dev), __func__));
   3488 
   3489 	if (sc->sc_type >= WM_T_82544)
   3490 		mta_reg = WMREG_CORDOVA_MTA;
   3491 	else
   3492 		mta_reg = WMREG_MTA;
   3493 
   3494 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3495 
   3496 	if (ifp->if_flags & IFF_BROADCAST)
   3497 		sc->sc_rctl |= RCTL_BAM;
   3498 	if (ifp->if_flags & IFF_PROMISC) {
   3499 		sc->sc_rctl |= RCTL_UPE;
   3500 		goto allmulti;
   3501 	}
   3502 
   3503 	/*
   3504 	 * Set the station address in the first RAL slot, and
   3505 	 * clear the remaining slots.
   3506 	 */
   3507 	if (sc->sc_type == WM_T_ICH8)
   3508 		size = WM_RAL_TABSIZE_ICH8 -1;
   3509 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3510 	    || (sc->sc_type == WM_T_PCH))
   3511 		size = WM_RAL_TABSIZE_ICH8;
   3512 	else if (sc->sc_type == WM_T_PCH2)
   3513 		size = WM_RAL_TABSIZE_PCH2;
   3514 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3515 	    || (sc->sc_type == WM_T_PCH_CNP))
   3516 		size = WM_RAL_TABSIZE_PCH_LPT;
   3517 	else if (sc->sc_type == WM_T_82575)
   3518 		size = WM_RAL_TABSIZE_82575;
   3519 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3520 		size = WM_RAL_TABSIZE_82576;
   3521 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3522 		size = WM_RAL_TABSIZE_I350;
   3523 	else
   3524 		size = WM_RAL_TABSIZE;
   3525 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3526 
   3527 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3528 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3529 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3530 		switch (i) {
   3531 		case 0:
   3532 			/* We can use all entries */
   3533 			ralmax = size;
   3534 			break;
   3535 		case 1:
   3536 			/* Only RAR[0] */
   3537 			ralmax = 1;
   3538 			break;
   3539 		default:
   3540 			/* available SHRA + RAR[0] */
   3541 			ralmax = i + 1;
   3542 		}
   3543 	} else
   3544 		ralmax = size;
   3545 	for (i = 1; i < size; i++) {
   3546 		if (i < ralmax)
   3547 			wm_set_ral(sc, NULL, i);
   3548 	}
   3549 
   3550 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3551 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3552 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3553 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3554 		size = WM_ICH8_MC_TABSIZE;
   3555 	else
   3556 		size = WM_MC_TABSIZE;
   3557 	/* Clear out the multicast table. */
   3558 	for (i = 0; i < size; i++) {
   3559 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3560 		CSR_WRITE_FLUSH(sc);
   3561 	}
   3562 
   3563 	ETHER_LOCK(ec);
   3564 	ETHER_FIRST_MULTI(step, ec, enm);
   3565 	while (enm != NULL) {
   3566 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3567 			ETHER_UNLOCK(ec);
   3568 			/*
   3569 			 * We must listen to a range of multicast addresses.
   3570 			 * For now, just accept all multicasts, rather than
   3571 			 * trying to set only those filter bits needed to match
   3572 			 * the range.  (At this time, the only use of address
   3573 			 * ranges is for IP multicast routing, for which the
   3574 			 * range is big enough to require all bits set.)
   3575 			 */
   3576 			goto allmulti;
   3577 		}
   3578 
   3579 		hash = wm_mchash(sc, enm->enm_addrlo);
   3580 
   3581 		reg = (hash >> 5);
   3582 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3583 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3584 		    || (sc->sc_type == WM_T_PCH2)
   3585 		    || (sc->sc_type == WM_T_PCH_LPT)
   3586 		    || (sc->sc_type == WM_T_PCH_SPT)
   3587 		    || (sc->sc_type == WM_T_PCH_CNP))
   3588 			reg &= 0x1f;
   3589 		else
   3590 			reg &= 0x7f;
   3591 		bit = hash & 0x1f;
   3592 
   3593 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3594 		hash |= 1U << bit;
   3595 
   3596 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3597 			/*
   3598 			 * 82544 Errata 9: Certain register cannot be written
   3599 			 * with particular alignments in PCI-X bus operation
   3600 			 * (FCAH, MTA and VFTA).
   3601 			 */
   3602 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3603 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3604 			CSR_WRITE_FLUSH(sc);
   3605 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3606 			CSR_WRITE_FLUSH(sc);
   3607 		} else {
   3608 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3609 			CSR_WRITE_FLUSH(sc);
   3610 		}
   3611 
   3612 		ETHER_NEXT_MULTI(step, enm);
   3613 	}
   3614 	ETHER_UNLOCK(ec);
   3615 
   3616 	ifp->if_flags &= ~IFF_ALLMULTI;
   3617 	goto setit;
   3618 
   3619  allmulti:
   3620 	ifp->if_flags |= IFF_ALLMULTI;
   3621 	sc->sc_rctl |= RCTL_MPE;
   3622 
   3623  setit:
   3624 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3625 }
   3626 
   3627 /* Reset and init related */
   3628 
   3629 static void
   3630 wm_set_vlan(struct wm_softc *sc)
   3631 {
   3632 
   3633 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3634 		device_xname(sc->sc_dev), __func__));
   3635 
   3636 	/* Deal with VLAN enables. */
   3637 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3638 		sc->sc_ctrl |= CTRL_VME;
   3639 	else
   3640 		sc->sc_ctrl &= ~CTRL_VME;
   3641 
   3642 	/* Write the control registers. */
   3643 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3644 }
   3645 
   3646 static void
   3647 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3648 {
   3649 	uint32_t gcr;
   3650 	pcireg_t ctrl2;
   3651 
   3652 	gcr = CSR_READ(sc, WMREG_GCR);
   3653 
   3654 	/* Only take action if timeout value is defaulted to 0 */
   3655 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3656 		goto out;
   3657 
   3658 	if ((gcr & GCR_CAP_VER2) == 0) {
   3659 		gcr |= GCR_CMPL_TMOUT_10MS;
   3660 		goto out;
   3661 	}
   3662 
   3663 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3664 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3665 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3666 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3667 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3668 
   3669 out:
   3670 	/* Disable completion timeout resend */
   3671 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3672 
   3673 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3674 }
   3675 
   3676 void
   3677 wm_get_auto_rd_done(struct wm_softc *sc)
   3678 {
   3679 	int i;
   3680 
   3681 	/* wait for eeprom to reload */
   3682 	switch (sc->sc_type) {
   3683 	case WM_T_82571:
   3684 	case WM_T_82572:
   3685 	case WM_T_82573:
   3686 	case WM_T_82574:
   3687 	case WM_T_82583:
   3688 	case WM_T_82575:
   3689 	case WM_T_82576:
   3690 	case WM_T_82580:
   3691 	case WM_T_I350:
   3692 	case WM_T_I354:
   3693 	case WM_T_I210:
   3694 	case WM_T_I211:
   3695 	case WM_T_80003:
   3696 	case WM_T_ICH8:
   3697 	case WM_T_ICH9:
   3698 		for (i = 0; i < 10; i++) {
   3699 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3700 				break;
   3701 			delay(1000);
   3702 		}
   3703 		if (i == 10) {
   3704 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3705 			    "complete\n", device_xname(sc->sc_dev));
   3706 		}
   3707 		break;
   3708 	default:
   3709 		break;
   3710 	}
   3711 }
   3712 
   3713 void
   3714 wm_lan_init_done(struct wm_softc *sc)
   3715 {
   3716 	uint32_t reg = 0;
   3717 	int i;
   3718 
   3719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3720 		device_xname(sc->sc_dev), __func__));
   3721 
   3722 	/* Wait for eeprom to reload */
   3723 	switch (sc->sc_type) {
   3724 	case WM_T_ICH10:
   3725 	case WM_T_PCH:
   3726 	case WM_T_PCH2:
   3727 	case WM_T_PCH_LPT:
   3728 	case WM_T_PCH_SPT:
   3729 	case WM_T_PCH_CNP:
   3730 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3731 			reg = CSR_READ(sc, WMREG_STATUS);
   3732 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3733 				break;
   3734 			delay(100);
   3735 		}
   3736 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3737 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3738 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3739 		}
   3740 		break;
   3741 	default:
   3742 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3743 		    __func__);
   3744 		break;
   3745 	}
   3746 
   3747 	reg &= ~STATUS_LAN_INIT_DONE;
   3748 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3749 }
   3750 
   3751 void
   3752 wm_get_cfg_done(struct wm_softc *sc)
   3753 {
   3754 	int mask;
   3755 	uint32_t reg;
   3756 	int i;
   3757 
   3758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3759 		device_xname(sc->sc_dev), __func__));
   3760 
   3761 	/* Wait for eeprom to reload */
   3762 	switch (sc->sc_type) {
   3763 	case WM_T_82542_2_0:
   3764 	case WM_T_82542_2_1:
   3765 		/* null */
   3766 		break;
   3767 	case WM_T_82543:
   3768 	case WM_T_82544:
   3769 	case WM_T_82540:
   3770 	case WM_T_82545:
   3771 	case WM_T_82545_3:
   3772 	case WM_T_82546:
   3773 	case WM_T_82546_3:
   3774 	case WM_T_82541:
   3775 	case WM_T_82541_2:
   3776 	case WM_T_82547:
   3777 	case WM_T_82547_2:
   3778 	case WM_T_82573:
   3779 	case WM_T_82574:
   3780 	case WM_T_82583:
   3781 		/* generic */
   3782 		delay(10*1000);
   3783 		break;
   3784 	case WM_T_80003:
   3785 	case WM_T_82571:
   3786 	case WM_T_82572:
   3787 	case WM_T_82575:
   3788 	case WM_T_82576:
   3789 	case WM_T_82580:
   3790 	case WM_T_I350:
   3791 	case WM_T_I354:
   3792 	case WM_T_I210:
   3793 	case WM_T_I211:
   3794 		if (sc->sc_type == WM_T_82571) {
   3795 			/* Only 82571 shares port 0 */
   3796 			mask = EEMNGCTL_CFGDONE_0;
   3797 		} else
   3798 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3799 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3800 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3801 				break;
   3802 			delay(1000);
   3803 		}
   3804 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3805 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3806 				device_xname(sc->sc_dev), __func__));
   3807 		}
   3808 		break;
   3809 	case WM_T_ICH8:
   3810 	case WM_T_ICH9:
   3811 	case WM_T_ICH10:
   3812 	case WM_T_PCH:
   3813 	case WM_T_PCH2:
   3814 	case WM_T_PCH_LPT:
   3815 	case WM_T_PCH_SPT:
   3816 	case WM_T_PCH_CNP:
   3817 		delay(10*1000);
   3818 		if (sc->sc_type >= WM_T_ICH10)
   3819 			wm_lan_init_done(sc);
   3820 		else
   3821 			wm_get_auto_rd_done(sc);
   3822 
   3823 		reg = CSR_READ(sc, WMREG_STATUS);
   3824 		if ((reg & STATUS_PHYRA) != 0)
   3825 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3826 		break;
   3827 	default:
   3828 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3829 		    __func__);
   3830 		break;
   3831 	}
   3832 }
   3833 
   3834 void
   3835 wm_phy_post_reset(struct wm_softc *sc)
   3836 {
   3837 	uint32_t reg;
   3838 
   3839 	/* This function is only for ICH8 and newer. */
   3840 	if (sc->sc_type < WM_T_ICH8)
   3841 		return;
   3842 
   3843 	if (wm_phy_resetisblocked(sc)) {
   3844 		/* XXX */
   3845 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3846 		return;
   3847 	}
   3848 
   3849 	/* Allow time for h/w to get to quiescent state after reset */
   3850 	delay(10*1000);
   3851 
   3852 	/* Perform any necessary post-reset workarounds */
   3853 	if (sc->sc_type == WM_T_PCH)
   3854 		wm_hv_phy_workaround_ich8lan(sc);
   3855 	if (sc->sc_type == WM_T_PCH2)
   3856 		wm_lv_phy_workaround_ich8lan(sc);
   3857 
   3858 	/* Clear the host wakeup bit after lcd reset */
   3859 	if (sc->sc_type >= WM_T_PCH) {
   3860 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3861 		    BM_PORT_GEN_CFG);
   3862 		reg &= ~BM_WUC_HOST_WU_BIT;
   3863 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3864 		    BM_PORT_GEN_CFG, reg);
   3865 	}
   3866 
   3867 	/* Configure the LCD with the extended configuration region in NVM */
   3868 	wm_init_lcd_from_nvm(sc);
   3869 
   3870 	/* Configure the LCD with the OEM bits in NVM */
   3871 }
   3872 
   3873 /* Only for PCH and newer */
   3874 static void
   3875 wm_write_smbus_addr(struct wm_softc *sc)
   3876 {
   3877 	uint32_t strap, freq;
   3878 	uint32_t phy_data;
   3879 
   3880 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3881 		device_xname(sc->sc_dev), __func__));
   3882 
   3883 	strap = CSR_READ(sc, WMREG_STRAP);
   3884 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3885 
   3886 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3887 
   3888 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3889 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3890 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3891 
   3892 	if (sc->sc_phytype == WMPHY_I217) {
   3893 		/* Restore SMBus frequency */
   3894 		if (freq --) {
   3895 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3896 			    | HV_SMB_ADDR_FREQ_HIGH);
   3897 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3898 			    HV_SMB_ADDR_FREQ_LOW);
   3899 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3900 			    HV_SMB_ADDR_FREQ_HIGH);
   3901 		} else {
   3902 			DPRINTF(WM_DEBUG_INIT,
   3903 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3904 				device_xname(sc->sc_dev), __func__));
   3905 		}
   3906 	}
   3907 
   3908 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3909 }
   3910 
   3911 void
   3912 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3913 {
   3914 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3915 	uint16_t phy_page = 0;
   3916 
   3917 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3918 		device_xname(sc->sc_dev), __func__));
   3919 
   3920 	switch (sc->sc_type) {
   3921 	case WM_T_ICH8:
   3922 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3923 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3924 			return;
   3925 
   3926 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3927 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3928 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3929 			break;
   3930 		}
   3931 		/* FALLTHROUGH */
   3932 	case WM_T_PCH:
   3933 	case WM_T_PCH2:
   3934 	case WM_T_PCH_LPT:
   3935 	case WM_T_PCH_SPT:
   3936 	case WM_T_PCH_CNP:
   3937 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3938 		break;
   3939 	default:
   3940 		return;
   3941 	}
   3942 
   3943 	sc->phy.acquire(sc);
   3944 
   3945 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3946 	if ((reg & sw_cfg_mask) == 0)
   3947 		goto release;
   3948 
   3949 	/*
   3950 	 * Make sure HW does not configure LCD from PHY extended configuration
   3951 	 * before SW configuration
   3952 	 */
   3953 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3954 	if ((sc->sc_type < WM_T_PCH2)
   3955 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3956 		goto release;
   3957 
   3958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3959 		device_xname(sc->sc_dev), __func__));
   3960 	/* word_addr is in DWORD */
   3961 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3962 
   3963 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3964 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3965 	if (cnf_size == 0)
   3966 		goto release;
   3967 
   3968 	if (((sc->sc_type == WM_T_PCH)
   3969 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3970 	    || (sc->sc_type > WM_T_PCH)) {
   3971 		/*
   3972 		 * HW configures the SMBus address and LEDs when the OEM and
   3973 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3974 		 * are cleared, SW will configure them instead.
   3975 		 */
   3976 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3977 			device_xname(sc->sc_dev), __func__));
   3978 		wm_write_smbus_addr(sc);
   3979 
   3980 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3981 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3982 	}
   3983 
   3984 	/* Configure LCD from extended configuration region. */
   3985 	for (i = 0; i < cnf_size; i++) {
   3986 		uint16_t reg_data, reg_addr;
   3987 
   3988 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3989 			goto release;
   3990 
   3991 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3992 			goto release;
   3993 
   3994 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3995 			phy_page = reg_data;
   3996 
   3997 		reg_addr &= IGPHY_MAXREGADDR;
   3998 		reg_addr |= phy_page;
   3999 
   4000 		sc->phy.release(sc); /* XXX */
   4001 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4002 		sc->phy.acquire(sc); /* XXX */
   4003 	}
   4004 
   4005 release:
   4006 	sc->phy.release(sc);
   4007 	return;
   4008 }
   4009 
   4010 
   4011 /* Init hardware bits */
   4012 void
   4013 wm_initialize_hardware_bits(struct wm_softc *sc)
   4014 {
   4015 	uint32_t tarc0, tarc1, reg;
   4016 
   4017 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4018 		device_xname(sc->sc_dev), __func__));
   4019 
   4020 	/* For 82571 variant, 80003 and ICHs */
   4021 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4022 	    || (sc->sc_type >= WM_T_80003)) {
   4023 
   4024 		/* Transmit Descriptor Control 0 */
   4025 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4026 		reg |= TXDCTL_COUNT_DESC;
   4027 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4028 
   4029 		/* Transmit Descriptor Control 1 */
   4030 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4031 		reg |= TXDCTL_COUNT_DESC;
   4032 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4033 
   4034 		/* TARC0 */
   4035 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4036 		switch (sc->sc_type) {
   4037 		case WM_T_82571:
   4038 		case WM_T_82572:
   4039 		case WM_T_82573:
   4040 		case WM_T_82574:
   4041 		case WM_T_82583:
   4042 		case WM_T_80003:
   4043 			/* Clear bits 30..27 */
   4044 			tarc0 &= ~__BITS(30, 27);
   4045 			break;
   4046 		default:
   4047 			break;
   4048 		}
   4049 
   4050 		switch (sc->sc_type) {
   4051 		case WM_T_82571:
   4052 		case WM_T_82572:
   4053 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4054 
   4055 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4056 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4057 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4058 			/* 8257[12] Errata No.7 */
   4059 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4060 
   4061 			/* TARC1 bit 28 */
   4062 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4063 				tarc1 &= ~__BIT(28);
   4064 			else
   4065 				tarc1 |= __BIT(28);
   4066 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4067 
   4068 			/*
   4069 			 * 8257[12] Errata No.13
   4070 			 * Disable Dyamic Clock Gating.
   4071 			 */
   4072 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4073 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4074 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4075 			break;
   4076 		case WM_T_82573:
   4077 		case WM_T_82574:
   4078 		case WM_T_82583:
   4079 			if ((sc->sc_type == WM_T_82574)
   4080 			    || (sc->sc_type == WM_T_82583))
   4081 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4082 
   4083 			/* Extended Device Control */
   4084 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4085 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4086 			reg |= __BIT(22);	/* Set bit 22 */
   4087 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4088 
   4089 			/* Device Control */
   4090 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4091 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4092 
   4093 			/* PCIe Control Register */
   4094 			/*
   4095 			 * 82573 Errata (unknown).
   4096 			 *
   4097 			 * 82574 Errata 25 and 82583 Errata 12
   4098 			 * "Dropped Rx Packets":
   4099 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4100 			 */
   4101 			reg = CSR_READ(sc, WMREG_GCR);
   4102 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4103 			CSR_WRITE(sc, WMREG_GCR, reg);
   4104 
   4105 			if ((sc->sc_type == WM_T_82574)
   4106 			    || (sc->sc_type == WM_T_82583)) {
   4107 				/*
   4108 				 * Document says this bit must be set for
   4109 				 * proper operation.
   4110 				 */
   4111 				reg = CSR_READ(sc, WMREG_GCR);
   4112 				reg |= __BIT(22);
   4113 				CSR_WRITE(sc, WMREG_GCR, reg);
   4114 
   4115 				/*
   4116 				 * Apply workaround for hardware errata
   4117 				 * documented in errata docs Fixes issue where
   4118 				 * some error prone or unreliable PCIe
   4119 				 * completions are occurring, particularly
   4120 				 * with ASPM enabled. Without fix, issue can
   4121 				 * cause Tx timeouts.
   4122 				 */
   4123 				reg = CSR_READ(sc, WMREG_GCR2);
   4124 				reg |= __BIT(0);
   4125 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4126 			}
   4127 			break;
   4128 		case WM_T_80003:
   4129 			/* TARC0 */
   4130 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4131 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4132 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4133 
   4134 			/* TARC1 bit 28 */
   4135 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4136 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4137 				tarc1 &= ~__BIT(28);
   4138 			else
   4139 				tarc1 |= __BIT(28);
   4140 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4141 			break;
   4142 		case WM_T_ICH8:
   4143 		case WM_T_ICH9:
   4144 		case WM_T_ICH10:
   4145 		case WM_T_PCH:
   4146 		case WM_T_PCH2:
   4147 		case WM_T_PCH_LPT:
   4148 		case WM_T_PCH_SPT:
   4149 		case WM_T_PCH_CNP:
   4150 			/* TARC0 */
   4151 			if (sc->sc_type == WM_T_ICH8) {
   4152 				/* Set TARC0 bits 29 and 28 */
   4153 				tarc0 |= __BITS(29, 28);
   4154 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4155 				tarc0 |= __BIT(29);
   4156 				/*
   4157 				 *  Drop bit 28. From Linux.
   4158 				 * See I218/I219 spec update
   4159 				 * "5. Buffer Overrun While the I219 is
   4160 				 * Processing DMA Transactions"
   4161 				 */
   4162 				tarc0 &= ~__BIT(28);
   4163 			}
   4164 			/* Set TARC0 bits 23,24,26,27 */
   4165 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4166 
   4167 			/* CTRL_EXT */
   4168 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4169 			reg |= __BIT(22);	/* Set bit 22 */
   4170 			/*
   4171 			 * Enable PHY low-power state when MAC is at D3
   4172 			 * w/o WoL
   4173 			 */
   4174 			if (sc->sc_type >= WM_T_PCH)
   4175 				reg |= CTRL_EXT_PHYPDEN;
   4176 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4177 
   4178 			/* TARC1 */
   4179 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4180 			/* bit 28 */
   4181 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4182 				tarc1 &= ~__BIT(28);
   4183 			else
   4184 				tarc1 |= __BIT(28);
   4185 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4186 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4187 
   4188 			/* Device Status */
   4189 			if (sc->sc_type == WM_T_ICH8) {
   4190 				reg = CSR_READ(sc, WMREG_STATUS);
   4191 				reg &= ~__BIT(31);
   4192 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4193 
   4194 			}
   4195 
   4196 			/* IOSFPC */
   4197 			if (sc->sc_type == WM_T_PCH_SPT) {
   4198 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4199 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4200 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4201 			}
   4202 			/*
   4203 			 * Work-around descriptor data corruption issue during
   4204 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4205 			 * capability.
   4206 			 */
   4207 			reg = CSR_READ(sc, WMREG_RFCTL);
   4208 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4209 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4210 			break;
   4211 		default:
   4212 			break;
   4213 		}
   4214 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4215 
   4216 		switch (sc->sc_type) {
   4217 		/*
   4218 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4219 		 * Avoid RSS Hash Value bug.
   4220 		 */
   4221 		case WM_T_82571:
   4222 		case WM_T_82572:
   4223 		case WM_T_82573:
   4224 		case WM_T_80003:
   4225 		case WM_T_ICH8:
   4226 			reg = CSR_READ(sc, WMREG_RFCTL);
   4227 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4228 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4229 			break;
   4230 		case WM_T_82574:
   4231 			/* use extened Rx descriptor. */
   4232 			reg = CSR_READ(sc, WMREG_RFCTL);
   4233 			reg |= WMREG_RFCTL_EXSTEN;
   4234 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4235 			break;
   4236 		default:
   4237 			break;
   4238 		}
   4239 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4240 		/*
   4241 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4242 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4243 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4244 		 * Correctly by the Device"
   4245 		 *
   4246 		 * I354(C2000) Errata AVR53:
   4247 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4248 		 * Hang"
   4249 		 */
   4250 		reg = CSR_READ(sc, WMREG_RFCTL);
   4251 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4252 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4253 	}
   4254 }
   4255 
   4256 static uint32_t
   4257 wm_rxpbs_adjust_82580(uint32_t val)
   4258 {
   4259 	uint32_t rv = 0;
   4260 
   4261 	if (val < __arraycount(wm_82580_rxpbs_table))
   4262 		rv = wm_82580_rxpbs_table[val];
   4263 
   4264 	return rv;
   4265 }
   4266 
   4267 /*
   4268  * wm_reset_phy:
   4269  *
   4270  *	generic PHY reset function.
   4271  *	Same as e1000_phy_hw_reset_generic()
   4272  */
   4273 static void
   4274 wm_reset_phy(struct wm_softc *sc)
   4275 {
   4276 	uint32_t reg;
   4277 
   4278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4279 		device_xname(sc->sc_dev), __func__));
   4280 	if (wm_phy_resetisblocked(sc))
   4281 		return;
   4282 
   4283 	sc->phy.acquire(sc);
   4284 
   4285 	reg = CSR_READ(sc, WMREG_CTRL);
   4286 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4287 	CSR_WRITE_FLUSH(sc);
   4288 
   4289 	delay(sc->phy.reset_delay_us);
   4290 
   4291 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4292 	CSR_WRITE_FLUSH(sc);
   4293 
   4294 	delay(150);
   4295 
   4296 	sc->phy.release(sc);
   4297 
   4298 	wm_get_cfg_done(sc);
   4299 	wm_phy_post_reset(sc);
   4300 }
   4301 
   4302 /*
   4303  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4304  * so it is enough to check sc->sc_queue[0] only.
   4305  */
   4306 static void
   4307 wm_flush_desc_rings(struct wm_softc *sc)
   4308 {
   4309 	pcireg_t preg;
   4310 	uint32_t reg;
   4311 	struct wm_txqueue *txq;
   4312 	wiseman_txdesc_t *txd;
   4313 	int nexttx;
   4314 	uint32_t rctl;
   4315 
   4316 	/* First, disable MULR fix in FEXTNVM11 */
   4317 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4318 	reg |= FEXTNVM11_DIS_MULRFIX;
   4319 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4320 
   4321 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4322 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4323 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4324 		return;
   4325 
   4326 	/* TX */
   4327 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4328 	    device_xname(sc->sc_dev), preg, reg);
   4329 	reg = CSR_READ(sc, WMREG_TCTL);
   4330 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4331 
   4332 	txq = &sc->sc_queue[0].wmq_txq;
   4333 	nexttx = txq->txq_next;
   4334 	txd = &txq->txq_descs[nexttx];
   4335 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4336 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4337 	txd->wtx_fields.wtxu_status = 0;
   4338 	txd->wtx_fields.wtxu_options = 0;
   4339 	txd->wtx_fields.wtxu_vlan = 0;
   4340 
   4341 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4342 	    BUS_SPACE_BARRIER_WRITE);
   4343 
   4344 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4345 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4346 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4347 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4348 	delay(250);
   4349 
   4350 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4351 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4352 		return;
   4353 
   4354 	/* RX */
   4355 	printf("%s: Need RX flush (reg = %08x)\n",
   4356 	    device_xname(sc->sc_dev), preg);
   4357 	rctl = CSR_READ(sc, WMREG_RCTL);
   4358 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4359 	CSR_WRITE_FLUSH(sc);
   4360 	delay(150);
   4361 
   4362 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4363 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4364 	reg &= 0xffffc000;
   4365 	/*
   4366 	 * update thresholds: prefetch threshold to 31, host threshold
   4367 	 * to 1 and make sure the granularity is "descriptors" and not
   4368 	 * "cache lines"
   4369 	 */
   4370 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4371 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4372 
   4373 	/*
   4374 	 * momentarily enable the RX ring for the changes to take
   4375 	 * effect
   4376 	 */
   4377 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4378 	CSR_WRITE_FLUSH(sc);
   4379 	delay(150);
   4380 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4381 }
   4382 
   4383 /*
   4384  * wm_reset:
   4385  *
   4386  *	Reset the i82542 chip.
   4387  */
   4388 static void
   4389 wm_reset(struct wm_softc *sc)
   4390 {
   4391 	int phy_reset = 0;
   4392 	int i, error = 0;
   4393 	uint32_t reg;
   4394 	uint16_t kmreg;
   4395 	int rv;
   4396 
   4397 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4398 		device_xname(sc->sc_dev), __func__));
   4399 	KASSERT(sc->sc_type != 0);
   4400 
   4401 	/*
   4402 	 * Allocate on-chip memory according to the MTU size.
   4403 	 * The Packet Buffer Allocation register must be written
   4404 	 * before the chip is reset.
   4405 	 */
   4406 	switch (sc->sc_type) {
   4407 	case WM_T_82547:
   4408 	case WM_T_82547_2:
   4409 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4410 		    PBA_22K : PBA_30K;
   4411 		for (i = 0; i < sc->sc_nqueues; i++) {
   4412 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4413 			txq->txq_fifo_head = 0;
   4414 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4415 			txq->txq_fifo_size =
   4416 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4417 			txq->txq_fifo_stall = 0;
   4418 		}
   4419 		break;
   4420 	case WM_T_82571:
   4421 	case WM_T_82572:
   4422 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4423 	case WM_T_80003:
   4424 		sc->sc_pba = PBA_32K;
   4425 		break;
   4426 	case WM_T_82573:
   4427 		sc->sc_pba = PBA_12K;
   4428 		break;
   4429 	case WM_T_82574:
   4430 	case WM_T_82583:
   4431 		sc->sc_pba = PBA_20K;
   4432 		break;
   4433 	case WM_T_82576:
   4434 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4435 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4436 		break;
   4437 	case WM_T_82580:
   4438 	case WM_T_I350:
   4439 	case WM_T_I354:
   4440 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4441 		break;
   4442 	case WM_T_I210:
   4443 	case WM_T_I211:
   4444 		sc->sc_pba = PBA_34K;
   4445 		break;
   4446 	case WM_T_ICH8:
   4447 		/* Workaround for a bit corruption issue in FIFO memory */
   4448 		sc->sc_pba = PBA_8K;
   4449 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4450 		break;
   4451 	case WM_T_ICH9:
   4452 	case WM_T_ICH10:
   4453 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4454 		    PBA_14K : PBA_10K;
   4455 		break;
   4456 	case WM_T_PCH:
   4457 	case WM_T_PCH2:	/* XXX 14K? */
   4458 	case WM_T_PCH_LPT:
   4459 	case WM_T_PCH_SPT:
   4460 	case WM_T_PCH_CNP:
   4461 		sc->sc_pba = PBA_26K;
   4462 		break;
   4463 	default:
   4464 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4465 		    PBA_40K : PBA_48K;
   4466 		break;
   4467 	}
   4468 	/*
   4469 	 * Only old or non-multiqueue devices have the PBA register
   4470 	 * XXX Need special handling for 82575.
   4471 	 */
   4472 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4473 	    || (sc->sc_type == WM_T_82575))
   4474 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4475 
   4476 	/* Prevent the PCI-E bus from sticking */
   4477 	if (sc->sc_flags & WM_F_PCIE) {
   4478 		int timeout = 800;
   4479 
   4480 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4481 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4482 
   4483 		while (timeout--) {
   4484 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4485 			    == 0)
   4486 				break;
   4487 			delay(100);
   4488 		}
   4489 		if (timeout == 0)
   4490 			device_printf(sc->sc_dev,
   4491 			    "failed to disable busmastering\n");
   4492 	}
   4493 
   4494 	/* Set the completion timeout for interface */
   4495 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4496 	    || (sc->sc_type == WM_T_82580)
   4497 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4498 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4499 		wm_set_pcie_completion_timeout(sc);
   4500 
   4501 	/* Clear interrupt */
   4502 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4503 	if (wm_is_using_msix(sc)) {
   4504 		if (sc->sc_type != WM_T_82574) {
   4505 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4506 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4507 		} else {
   4508 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4509 		}
   4510 	}
   4511 
   4512 	/* Stop the transmit and receive processes. */
   4513 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4514 	sc->sc_rctl &= ~RCTL_EN;
   4515 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4516 	CSR_WRITE_FLUSH(sc);
   4517 
   4518 	/* XXX set_tbi_sbp_82543() */
   4519 
   4520 	delay(10*1000);
   4521 
   4522 	/* Must acquire the MDIO ownership before MAC reset */
   4523 	switch (sc->sc_type) {
   4524 	case WM_T_82573:
   4525 	case WM_T_82574:
   4526 	case WM_T_82583:
   4527 		error = wm_get_hw_semaphore_82573(sc);
   4528 		break;
   4529 	default:
   4530 		break;
   4531 	}
   4532 
   4533 	/*
   4534 	 * 82541 Errata 29? & 82547 Errata 28?
   4535 	 * See also the description about PHY_RST bit in CTRL register
   4536 	 * in 8254x_GBe_SDM.pdf.
   4537 	 */
   4538 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4539 		CSR_WRITE(sc, WMREG_CTRL,
   4540 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4541 		CSR_WRITE_FLUSH(sc);
   4542 		delay(5000);
   4543 	}
   4544 
   4545 	switch (sc->sc_type) {
   4546 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4547 	case WM_T_82541:
   4548 	case WM_T_82541_2:
   4549 	case WM_T_82547:
   4550 	case WM_T_82547_2:
   4551 		/*
   4552 		 * On some chipsets, a reset through a memory-mapped write
   4553 		 * cycle can cause the chip to reset before completing the
   4554 		 * write cycle.  This causes major headache that can be
   4555 		 * avoided by issuing the reset via indirect register writes
   4556 		 * through I/O space.
   4557 		 *
   4558 		 * So, if we successfully mapped the I/O BAR at attach time,
   4559 		 * use that.  Otherwise, try our luck with a memory-mapped
   4560 		 * reset.
   4561 		 */
   4562 		if (sc->sc_flags & WM_F_IOH_VALID)
   4563 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4564 		else
   4565 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4566 		break;
   4567 	case WM_T_82545_3:
   4568 	case WM_T_82546_3:
   4569 		/* Use the shadow control register on these chips. */
   4570 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4571 		break;
   4572 	case WM_T_80003:
   4573 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4574 		sc->phy.acquire(sc);
   4575 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4576 		sc->phy.release(sc);
   4577 		break;
   4578 	case WM_T_ICH8:
   4579 	case WM_T_ICH9:
   4580 	case WM_T_ICH10:
   4581 	case WM_T_PCH:
   4582 	case WM_T_PCH2:
   4583 	case WM_T_PCH_LPT:
   4584 	case WM_T_PCH_SPT:
   4585 	case WM_T_PCH_CNP:
   4586 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4587 		if (wm_phy_resetisblocked(sc) == false) {
   4588 			/*
   4589 			 * Gate automatic PHY configuration by hardware on
   4590 			 * non-managed 82579
   4591 			 */
   4592 			if ((sc->sc_type == WM_T_PCH2)
   4593 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4594 				== 0))
   4595 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4596 
   4597 			reg |= CTRL_PHY_RESET;
   4598 			phy_reset = 1;
   4599 		} else
   4600 			printf("XXX reset is blocked!!!\n");
   4601 		sc->phy.acquire(sc);
   4602 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4603 		/* Don't insert a completion barrier when reset */
   4604 		delay(20*1000);
   4605 		mutex_exit(sc->sc_ich_phymtx);
   4606 		break;
   4607 	case WM_T_82580:
   4608 	case WM_T_I350:
   4609 	case WM_T_I354:
   4610 	case WM_T_I210:
   4611 	case WM_T_I211:
   4612 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4613 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4614 			CSR_WRITE_FLUSH(sc);
   4615 		delay(5000);
   4616 		break;
   4617 	case WM_T_82542_2_0:
   4618 	case WM_T_82542_2_1:
   4619 	case WM_T_82543:
   4620 	case WM_T_82540:
   4621 	case WM_T_82545:
   4622 	case WM_T_82546:
   4623 	case WM_T_82571:
   4624 	case WM_T_82572:
   4625 	case WM_T_82573:
   4626 	case WM_T_82574:
   4627 	case WM_T_82575:
   4628 	case WM_T_82576:
   4629 	case WM_T_82583:
   4630 	default:
   4631 		/* Everything else can safely use the documented method. */
   4632 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4633 		break;
   4634 	}
   4635 
   4636 	/* Must release the MDIO ownership after MAC reset */
   4637 	switch (sc->sc_type) {
   4638 	case WM_T_82573:
   4639 	case WM_T_82574:
   4640 	case WM_T_82583:
   4641 		if (error == 0)
   4642 			wm_put_hw_semaphore_82573(sc);
   4643 		break;
   4644 	default:
   4645 		break;
   4646 	}
   4647 
   4648 	if (phy_reset != 0)
   4649 		wm_get_cfg_done(sc);
   4650 
   4651 	/* reload EEPROM */
   4652 	switch (sc->sc_type) {
   4653 	case WM_T_82542_2_0:
   4654 	case WM_T_82542_2_1:
   4655 	case WM_T_82543:
   4656 	case WM_T_82544:
   4657 		delay(10);
   4658 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4659 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4660 		CSR_WRITE_FLUSH(sc);
   4661 		delay(2000);
   4662 		break;
   4663 	case WM_T_82540:
   4664 	case WM_T_82545:
   4665 	case WM_T_82545_3:
   4666 	case WM_T_82546:
   4667 	case WM_T_82546_3:
   4668 		delay(5*1000);
   4669 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4670 		break;
   4671 	case WM_T_82541:
   4672 	case WM_T_82541_2:
   4673 	case WM_T_82547:
   4674 	case WM_T_82547_2:
   4675 		delay(20000);
   4676 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4677 		break;
   4678 	case WM_T_82571:
   4679 	case WM_T_82572:
   4680 	case WM_T_82573:
   4681 	case WM_T_82574:
   4682 	case WM_T_82583:
   4683 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4684 			delay(10);
   4685 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4686 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4687 			CSR_WRITE_FLUSH(sc);
   4688 		}
   4689 		/* check EECD_EE_AUTORD */
   4690 		wm_get_auto_rd_done(sc);
   4691 		/*
   4692 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4693 		 * is set.
   4694 		 */
   4695 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4696 		    || (sc->sc_type == WM_T_82583))
   4697 			delay(25*1000);
   4698 		break;
   4699 	case WM_T_82575:
   4700 	case WM_T_82576:
   4701 	case WM_T_82580:
   4702 	case WM_T_I350:
   4703 	case WM_T_I354:
   4704 	case WM_T_I210:
   4705 	case WM_T_I211:
   4706 	case WM_T_80003:
   4707 		/* check EECD_EE_AUTORD */
   4708 		wm_get_auto_rd_done(sc);
   4709 		break;
   4710 	case WM_T_ICH8:
   4711 	case WM_T_ICH9:
   4712 	case WM_T_ICH10:
   4713 	case WM_T_PCH:
   4714 	case WM_T_PCH2:
   4715 	case WM_T_PCH_LPT:
   4716 	case WM_T_PCH_SPT:
   4717 	case WM_T_PCH_CNP:
   4718 		break;
   4719 	default:
   4720 		panic("%s: unknown type\n", __func__);
   4721 	}
   4722 
   4723 	/* Check whether EEPROM is present or not */
   4724 	switch (sc->sc_type) {
   4725 	case WM_T_82575:
   4726 	case WM_T_82576:
   4727 	case WM_T_82580:
   4728 	case WM_T_I350:
   4729 	case WM_T_I354:
   4730 	case WM_T_ICH8:
   4731 	case WM_T_ICH9:
   4732 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4733 			/* Not found */
   4734 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4735 			if (sc->sc_type == WM_T_82575)
   4736 				wm_reset_init_script_82575(sc);
   4737 		}
   4738 		break;
   4739 	default:
   4740 		break;
   4741 	}
   4742 
   4743 	if (phy_reset != 0)
   4744 		wm_phy_post_reset(sc);
   4745 
   4746 	if ((sc->sc_type == WM_T_82580)
   4747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4748 		/* clear global device reset status bit */
   4749 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4750 	}
   4751 
   4752 	/* Clear any pending interrupt events. */
   4753 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4754 	reg = CSR_READ(sc, WMREG_ICR);
   4755 	if (wm_is_using_msix(sc)) {
   4756 		if (sc->sc_type != WM_T_82574) {
   4757 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4758 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4759 		} else
   4760 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4761 	}
   4762 
   4763 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4764 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4765 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4766 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4767 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4768 		reg |= KABGTXD_BGSQLBIAS;
   4769 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4770 	}
   4771 
   4772 	/* reload sc_ctrl */
   4773 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4774 
   4775 	if (sc->sc_type == WM_T_I354) {
   4776 #if 0
   4777 		/* I354 uses an external PHY */
   4778 		wm_set_eee_i354(sc);
   4779 #endif
   4780 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4781 		wm_set_eee_i350(sc);
   4782 
   4783 	/*
   4784 	 * For PCH, this write will make sure that any noise will be detected
   4785 	 * as a CRC error and be dropped rather than show up as a bad packet
   4786 	 * to the DMA engine
   4787 	 */
   4788 	if (sc->sc_type == WM_T_PCH)
   4789 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4790 
   4791 	if (sc->sc_type >= WM_T_82544)
   4792 		CSR_WRITE(sc, WMREG_WUC, 0);
   4793 
   4794 	wm_reset_mdicnfg_82580(sc);
   4795 
   4796 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4797 		wm_pll_workaround_i210(sc);
   4798 
   4799 	if (sc->sc_type == WM_T_80003) {
   4800 		/* default to TRUE to enable the MDIC W/A */
   4801 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4802 
   4803 		rv = wm_kmrn_readreg(sc,
   4804 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4805 		if (rv == 0) {
   4806 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4807 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4808 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4809 			else
   4810 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4811 		}
   4812 	}
   4813 }
   4814 
   4815 /*
   4816  * wm_add_rxbuf:
   4817  *
   4818  *	Add a receive buffer to the indiciated descriptor.
   4819  */
   4820 static int
   4821 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4822 {
   4823 	struct wm_softc *sc = rxq->rxq_sc;
   4824 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4825 	struct mbuf *m;
   4826 	int error;
   4827 
   4828 	KASSERT(mutex_owned(rxq->rxq_lock));
   4829 
   4830 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4831 	if (m == NULL)
   4832 		return ENOBUFS;
   4833 
   4834 	MCLGET(m, M_DONTWAIT);
   4835 	if ((m->m_flags & M_EXT) == 0) {
   4836 		m_freem(m);
   4837 		return ENOBUFS;
   4838 	}
   4839 
   4840 	if (rxs->rxs_mbuf != NULL)
   4841 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4842 
   4843 	rxs->rxs_mbuf = m;
   4844 
   4845 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4846 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4847 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4848 	if (error) {
   4849 		/* XXX XXX XXX */
   4850 		aprint_error_dev(sc->sc_dev,
   4851 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4852 		panic("wm_add_rxbuf");
   4853 	}
   4854 
   4855 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4856 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4857 
   4858 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4859 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4860 			wm_init_rxdesc(rxq, idx);
   4861 	} else
   4862 		wm_init_rxdesc(rxq, idx);
   4863 
   4864 	return 0;
   4865 }
   4866 
   4867 /*
   4868  * wm_rxdrain:
   4869  *
   4870  *	Drain the receive queue.
   4871  */
   4872 static void
   4873 wm_rxdrain(struct wm_rxqueue *rxq)
   4874 {
   4875 	struct wm_softc *sc = rxq->rxq_sc;
   4876 	struct wm_rxsoft *rxs;
   4877 	int i;
   4878 
   4879 	KASSERT(mutex_owned(rxq->rxq_lock));
   4880 
   4881 	for (i = 0; i < WM_NRXDESC; i++) {
   4882 		rxs = &rxq->rxq_soft[i];
   4883 		if (rxs->rxs_mbuf != NULL) {
   4884 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4885 			m_freem(rxs->rxs_mbuf);
   4886 			rxs->rxs_mbuf = NULL;
   4887 		}
   4888 	}
   4889 }
   4890 
   4891 /*
   4892  * Setup registers for RSS.
   4893  *
   4894  * XXX not yet VMDq support
   4895  */
   4896 static void
   4897 wm_init_rss(struct wm_softc *sc)
   4898 {
   4899 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4900 	int i;
   4901 
   4902 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4903 
   4904 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4905 		int qid, reta_ent;
   4906 
   4907 		qid  = i % sc->sc_nqueues;
   4908 		switch(sc->sc_type) {
   4909 		case WM_T_82574:
   4910 			reta_ent = __SHIFTIN(qid,
   4911 			    RETA_ENT_QINDEX_MASK_82574);
   4912 			break;
   4913 		case WM_T_82575:
   4914 			reta_ent = __SHIFTIN(qid,
   4915 			    RETA_ENT_QINDEX1_MASK_82575);
   4916 			break;
   4917 		default:
   4918 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4919 			break;
   4920 		}
   4921 
   4922 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4923 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4924 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4925 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4926 	}
   4927 
   4928 	rss_getkey((uint8_t *)rss_key);
   4929 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4930 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4931 
   4932 	if (sc->sc_type == WM_T_82574)
   4933 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4934 	else
   4935 		mrqc = MRQC_ENABLE_RSS_MQ;
   4936 
   4937 	/*
   4938 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4939 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4940 	 */
   4941 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4942 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4943 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4944 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4945 
   4946 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4947 }
   4948 
   4949 /*
   4950  * Adjust TX and RX queue numbers which the system actulally uses.
   4951  *
   4952  * The numbers are affected by below parameters.
   4953  *     - The nubmer of hardware queues
   4954  *     - The number of MSI-X vectors (= "nvectors" argument)
   4955  *     - ncpu
   4956  */
   4957 static void
   4958 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4959 {
   4960 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4961 
   4962 	if (nvectors < 2) {
   4963 		sc->sc_nqueues = 1;
   4964 		return;
   4965 	}
   4966 
   4967 	switch(sc->sc_type) {
   4968 	case WM_T_82572:
   4969 		hw_ntxqueues = 2;
   4970 		hw_nrxqueues = 2;
   4971 		break;
   4972 	case WM_T_82574:
   4973 		hw_ntxqueues = 2;
   4974 		hw_nrxqueues = 2;
   4975 		break;
   4976 	case WM_T_82575:
   4977 		hw_ntxqueues = 4;
   4978 		hw_nrxqueues = 4;
   4979 		break;
   4980 	case WM_T_82576:
   4981 		hw_ntxqueues = 16;
   4982 		hw_nrxqueues = 16;
   4983 		break;
   4984 	case WM_T_82580:
   4985 	case WM_T_I350:
   4986 	case WM_T_I354:
   4987 		hw_ntxqueues = 8;
   4988 		hw_nrxqueues = 8;
   4989 		break;
   4990 	case WM_T_I210:
   4991 		hw_ntxqueues = 4;
   4992 		hw_nrxqueues = 4;
   4993 		break;
   4994 	case WM_T_I211:
   4995 		hw_ntxqueues = 2;
   4996 		hw_nrxqueues = 2;
   4997 		break;
   4998 		/*
   4999 		 * As below ethernet controllers does not support MSI-X,
   5000 		 * this driver let them not use multiqueue.
   5001 		 *     - WM_T_80003
   5002 		 *     - WM_T_ICH8
   5003 		 *     - WM_T_ICH9
   5004 		 *     - WM_T_ICH10
   5005 		 *     - WM_T_PCH
   5006 		 *     - WM_T_PCH2
   5007 		 *     - WM_T_PCH_LPT
   5008 		 */
   5009 	default:
   5010 		hw_ntxqueues = 1;
   5011 		hw_nrxqueues = 1;
   5012 		break;
   5013 	}
   5014 
   5015 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   5016 
   5017 	/*
   5018 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5019 	 * the number of queues used actually.
   5020 	 */
   5021 	if (nvectors < hw_nqueues + 1)
   5022 		sc->sc_nqueues = nvectors - 1;
   5023 	else
   5024 		sc->sc_nqueues = hw_nqueues;
   5025 
   5026 	/*
   5027 	 * As queues more then cpus cannot improve scaling, we limit
   5028 	 * the number of queues used actually.
   5029 	 */
   5030 	if (ncpu < sc->sc_nqueues)
   5031 		sc->sc_nqueues = ncpu;
   5032 }
   5033 
   5034 static inline bool
   5035 wm_is_using_msix(struct wm_softc *sc)
   5036 {
   5037 
   5038 	return (sc->sc_nintrs > 1);
   5039 }
   5040 
   5041 static inline bool
   5042 wm_is_using_multiqueue(struct wm_softc *sc)
   5043 {
   5044 
   5045 	return (sc->sc_nqueues > 1);
   5046 }
   5047 
   5048 static int
   5049 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5050 {
   5051 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5052 	wmq->wmq_id = qidx;
   5053 	wmq->wmq_intr_idx = intr_idx;
   5054 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5055 #ifdef WM_MPSAFE
   5056 	    | SOFTINT_MPSAFE
   5057 #endif
   5058 	    , wm_handle_queue, wmq);
   5059 	if (wmq->wmq_si != NULL)
   5060 		return 0;
   5061 
   5062 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5063 	    wmq->wmq_id);
   5064 
   5065 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5066 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5067 	return ENOMEM;
   5068 }
   5069 
   5070 /*
   5071  * Both single interrupt MSI and INTx can use this function.
   5072  */
   5073 static int
   5074 wm_setup_legacy(struct wm_softc *sc)
   5075 {
   5076 	pci_chipset_tag_t pc = sc->sc_pc;
   5077 	const char *intrstr = NULL;
   5078 	char intrbuf[PCI_INTRSTR_LEN];
   5079 	int error;
   5080 
   5081 	error = wm_alloc_txrx_queues(sc);
   5082 	if (error) {
   5083 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5084 		    error);
   5085 		return ENOMEM;
   5086 	}
   5087 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5088 	    sizeof(intrbuf));
   5089 #ifdef WM_MPSAFE
   5090 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5091 #endif
   5092 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5093 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5094 	if (sc->sc_ihs[0] == NULL) {
   5095 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5096 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5097 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5098 		return ENOMEM;
   5099 	}
   5100 
   5101 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5102 	sc->sc_nintrs = 1;
   5103 
   5104 	return wm_softint_establish(sc, 0, 0);
   5105 }
   5106 
   5107 static int
   5108 wm_setup_msix(struct wm_softc *sc)
   5109 {
   5110 	void *vih;
   5111 	kcpuset_t *affinity;
   5112 	int qidx, error, intr_idx, txrx_established;
   5113 	pci_chipset_tag_t pc = sc->sc_pc;
   5114 	const char *intrstr = NULL;
   5115 	char intrbuf[PCI_INTRSTR_LEN];
   5116 	char intr_xname[INTRDEVNAMEBUF];
   5117 
   5118 	if (sc->sc_nqueues < ncpu) {
   5119 		/*
   5120 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5121 		 * interrupts start from CPU#1.
   5122 		 */
   5123 		sc->sc_affinity_offset = 1;
   5124 	} else {
   5125 		/*
   5126 		 * In this case, this device use all CPUs. So, we unify
   5127 		 * affinitied cpu_index to msix vector number for readability.
   5128 		 */
   5129 		sc->sc_affinity_offset = 0;
   5130 	}
   5131 
   5132 	error = wm_alloc_txrx_queues(sc);
   5133 	if (error) {
   5134 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5135 		    error);
   5136 		return ENOMEM;
   5137 	}
   5138 
   5139 	kcpuset_create(&affinity, false);
   5140 	intr_idx = 0;
   5141 
   5142 	/*
   5143 	 * TX and RX
   5144 	 */
   5145 	txrx_established = 0;
   5146 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5147 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5148 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5149 
   5150 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5151 		    sizeof(intrbuf));
   5152 #ifdef WM_MPSAFE
   5153 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5154 		    PCI_INTR_MPSAFE, true);
   5155 #endif
   5156 		memset(intr_xname, 0, sizeof(intr_xname));
   5157 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5158 		    device_xname(sc->sc_dev), qidx);
   5159 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5160 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5161 		if (vih == NULL) {
   5162 			aprint_error_dev(sc->sc_dev,
   5163 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5164 			    intrstr ? " at " : "",
   5165 			    intrstr ? intrstr : "");
   5166 
   5167 			goto fail;
   5168 		}
   5169 		kcpuset_zero(affinity);
   5170 		/* Round-robin affinity */
   5171 		kcpuset_set(affinity, affinity_to);
   5172 		error = interrupt_distribute(vih, affinity, NULL);
   5173 		if (error == 0) {
   5174 			aprint_normal_dev(sc->sc_dev,
   5175 			    "for TX and RX interrupting at %s affinity to %u\n",
   5176 			    intrstr, affinity_to);
   5177 		} else {
   5178 			aprint_normal_dev(sc->sc_dev,
   5179 			    "for TX and RX interrupting at %s\n", intrstr);
   5180 		}
   5181 		sc->sc_ihs[intr_idx] = vih;
   5182 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5183 			goto fail;
   5184 		txrx_established++;
   5185 		intr_idx++;
   5186 	}
   5187 
   5188 	/*
   5189 	 * LINK
   5190 	 */
   5191 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5192 	    sizeof(intrbuf));
   5193 #ifdef WM_MPSAFE
   5194 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5195 #endif
   5196 	memset(intr_xname, 0, sizeof(intr_xname));
   5197 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5198 	    device_xname(sc->sc_dev));
   5199 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5200 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5201 	if (vih == NULL) {
   5202 		aprint_error_dev(sc->sc_dev,
   5203 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5204 		    intrstr ? " at " : "",
   5205 		    intrstr ? intrstr : "");
   5206 
   5207 		goto fail;
   5208 	}
   5209 	/* keep default affinity to LINK interrupt */
   5210 	aprint_normal_dev(sc->sc_dev,
   5211 	    "for LINK interrupting at %s\n", intrstr);
   5212 	sc->sc_ihs[intr_idx] = vih;
   5213 	sc->sc_link_intr_idx = intr_idx;
   5214 
   5215 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5216 	kcpuset_destroy(affinity);
   5217 	return 0;
   5218 
   5219  fail:
   5220 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5221 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5222 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5223 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5224 	}
   5225 
   5226 	kcpuset_destroy(affinity);
   5227 	return ENOMEM;
   5228 }
   5229 
   5230 static void
   5231 wm_unset_stopping_flags(struct wm_softc *sc)
   5232 {
   5233 	int i;
   5234 
   5235 	KASSERT(WM_CORE_LOCKED(sc));
   5236 
   5237 	/*
   5238 	 * must unset stopping flags in ascending order.
   5239 	 */
   5240 	for(i = 0; i < sc->sc_nqueues; i++) {
   5241 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5242 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5243 
   5244 		mutex_enter(txq->txq_lock);
   5245 		txq->txq_stopping = false;
   5246 		mutex_exit(txq->txq_lock);
   5247 
   5248 		mutex_enter(rxq->rxq_lock);
   5249 		rxq->rxq_stopping = false;
   5250 		mutex_exit(rxq->rxq_lock);
   5251 	}
   5252 
   5253 	sc->sc_core_stopping = false;
   5254 }
   5255 
   5256 static void
   5257 wm_set_stopping_flags(struct wm_softc *sc)
   5258 {
   5259 	int i;
   5260 
   5261 	KASSERT(WM_CORE_LOCKED(sc));
   5262 
   5263 	sc->sc_core_stopping = true;
   5264 
   5265 	/*
   5266 	 * must set stopping flags in ascending order.
   5267 	 */
   5268 	for(i = 0; i < sc->sc_nqueues; i++) {
   5269 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5270 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5271 
   5272 		mutex_enter(rxq->rxq_lock);
   5273 		rxq->rxq_stopping = true;
   5274 		mutex_exit(rxq->rxq_lock);
   5275 
   5276 		mutex_enter(txq->txq_lock);
   5277 		txq->txq_stopping = true;
   5278 		mutex_exit(txq->txq_lock);
   5279 	}
   5280 }
   5281 
   5282 /*
   5283  * write interrupt interval value to ITR or EITR
   5284  */
   5285 static void
   5286 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5287 {
   5288 
   5289 	if (!wmq->wmq_set_itr)
   5290 		return;
   5291 
   5292 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5293 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5294 
   5295 		/*
   5296 		 * 82575 doesn't have CNT_INGR field.
   5297 		 * So, overwrite counter field by software.
   5298 		 */
   5299 		if (sc->sc_type == WM_T_82575)
   5300 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5301 		else
   5302 			eitr |= EITR_CNT_INGR;
   5303 
   5304 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5305 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5306 		/*
   5307 		 * 82574 has both ITR and EITR. SET EITR when we use
   5308 		 * the multi queue function with MSI-X.
   5309 		 */
   5310 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5311 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5312 	} else {
   5313 		KASSERT(wmq->wmq_id == 0);
   5314 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5315 	}
   5316 
   5317 	wmq->wmq_set_itr = false;
   5318 }
   5319 
   5320 /*
   5321  * TODO
   5322  * Below dynamic calculation of itr is almost the same as linux igb,
   5323  * however it does not fit to wm(4). So, we will have been disable AIM
   5324  * until we will find appropriate calculation of itr.
   5325  */
   5326 /*
   5327  * calculate interrupt interval value to be going to write register in
   5328  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5329  */
   5330 static void
   5331 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5332 {
   5333 #ifdef NOTYET
   5334 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5335 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5336 	uint32_t avg_size = 0;
   5337 	uint32_t new_itr;
   5338 
   5339 	if (rxq->rxq_packets)
   5340 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5341 	if (txq->txq_packets)
   5342 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5343 
   5344 	if (avg_size == 0) {
   5345 		new_itr = 450; /* restore default value */
   5346 		goto out;
   5347 	}
   5348 
   5349 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5350 	avg_size += 24;
   5351 
   5352 	/* Don't starve jumbo frames */
   5353 	avg_size = min(avg_size, 3000);
   5354 
   5355 	/* Give a little boost to mid-size frames */
   5356 	if ((avg_size > 300) && (avg_size < 1200))
   5357 		new_itr = avg_size / 3;
   5358 	else
   5359 		new_itr = avg_size / 2;
   5360 
   5361 out:
   5362 	/*
   5363 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5364 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5365 	 */
   5366 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5367 		new_itr *= 4;
   5368 
   5369 	if (new_itr != wmq->wmq_itr) {
   5370 		wmq->wmq_itr = new_itr;
   5371 		wmq->wmq_set_itr = true;
   5372 	} else
   5373 		wmq->wmq_set_itr = false;
   5374 
   5375 	rxq->rxq_packets = 0;
   5376 	rxq->rxq_bytes = 0;
   5377 	txq->txq_packets = 0;
   5378 	txq->txq_bytes = 0;
   5379 #endif
   5380 }
   5381 
   5382 /*
   5383  * wm_init:		[ifnet interface function]
   5384  *
   5385  *	Initialize the interface.
   5386  */
   5387 static int
   5388 wm_init(struct ifnet *ifp)
   5389 {
   5390 	struct wm_softc *sc = ifp->if_softc;
   5391 	int ret;
   5392 
   5393 	WM_CORE_LOCK(sc);
   5394 	ret = wm_init_locked(ifp);
   5395 	WM_CORE_UNLOCK(sc);
   5396 
   5397 	return ret;
   5398 }
   5399 
   5400 static int
   5401 wm_init_locked(struct ifnet *ifp)
   5402 {
   5403 	struct wm_softc *sc = ifp->if_softc;
   5404 	int i, j, trynum, error = 0;
   5405 	uint32_t reg;
   5406 
   5407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5408 		device_xname(sc->sc_dev), __func__));
   5409 	KASSERT(WM_CORE_LOCKED(sc));
   5410 
   5411 	/*
   5412 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5413 	 * There is a small but measurable benefit to avoiding the adjusment
   5414 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5415 	 * on such platforms.  One possibility is that the DMA itself is
   5416 	 * slightly more efficient if the front of the entire packet (instead
   5417 	 * of the front of the headers) is aligned.
   5418 	 *
   5419 	 * Note we must always set align_tweak to 0 if we are using
   5420 	 * jumbo frames.
   5421 	 */
   5422 #ifdef __NO_STRICT_ALIGNMENT
   5423 	sc->sc_align_tweak = 0;
   5424 #else
   5425 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5426 		sc->sc_align_tweak = 0;
   5427 	else
   5428 		sc->sc_align_tweak = 2;
   5429 #endif /* __NO_STRICT_ALIGNMENT */
   5430 
   5431 	/* Cancel any pending I/O. */
   5432 	wm_stop_locked(ifp, 0);
   5433 
   5434 	/* update statistics before reset */
   5435 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5436 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5437 
   5438 	/* PCH_SPT hardware workaround */
   5439 	if (sc->sc_type == WM_T_PCH_SPT)
   5440 		wm_flush_desc_rings(sc);
   5441 
   5442 	/* Reset the chip to a known state. */
   5443 	wm_reset(sc);
   5444 
   5445 	/*
   5446 	 * AMT based hardware can now take control from firmware
   5447 	 * Do this after reset.
   5448 	 */
   5449 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5450 		wm_get_hw_control(sc);
   5451 
   5452 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5453 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5454 		wm_legacy_irq_quirk_spt(sc);
   5455 
   5456 	/* Init hardware bits */
   5457 	wm_initialize_hardware_bits(sc);
   5458 
   5459 	/* Reset the PHY. */
   5460 	if (sc->sc_flags & WM_F_HAS_MII)
   5461 		wm_gmii_reset(sc);
   5462 
   5463 	/* Calculate (E)ITR value */
   5464 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5465 		/*
   5466 		 * For NEWQUEUE's EITR (except for 82575).
   5467 		 * 82575's EITR should be set same throttling value as other
   5468 		 * old controllers' ITR because the interrupt/sec calculation
   5469 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5470 		 *
   5471 		 * 82574's EITR should be set same throttling value as ITR.
   5472 		 *
   5473 		 * For N interrupts/sec, set this value to:
   5474 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5475 		 */
   5476 		sc->sc_itr_init = 450;
   5477 	} else if (sc->sc_type >= WM_T_82543) {
   5478 		/*
   5479 		 * Set up the interrupt throttling register (units of 256ns)
   5480 		 * Note that a footnote in Intel's documentation says this
   5481 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5482 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5483 		 * that that is also true for the 1024ns units of the other
   5484 		 * interrupt-related timer registers -- so, really, we ought
   5485 		 * to divide this value by 4 when the link speed is low.
   5486 		 *
   5487 		 * XXX implement this division at link speed change!
   5488 		 */
   5489 
   5490 		/*
   5491 		 * For N interrupts/sec, set this value to:
   5492 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5493 		 * absolute and packet timer values to this value
   5494 		 * divided by 4 to get "simple timer" behavior.
   5495 		 */
   5496 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5497 	}
   5498 
   5499 	error = wm_init_txrx_queues(sc);
   5500 	if (error)
   5501 		goto out;
   5502 
   5503 	/*
   5504 	 * Clear out the VLAN table -- we don't use it (yet).
   5505 	 */
   5506 	CSR_WRITE(sc, WMREG_VET, 0);
   5507 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5508 		trynum = 10; /* Due to hw errata */
   5509 	else
   5510 		trynum = 1;
   5511 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5512 		for (j = 0; j < trynum; j++)
   5513 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5514 
   5515 	/*
   5516 	 * Set up flow-control parameters.
   5517 	 *
   5518 	 * XXX Values could probably stand some tuning.
   5519 	 */
   5520 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5521 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5522 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5523 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5524 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5525 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5526 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5527 	}
   5528 
   5529 	sc->sc_fcrtl = FCRTL_DFLT;
   5530 	if (sc->sc_type < WM_T_82543) {
   5531 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5532 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5533 	} else {
   5534 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5535 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5536 	}
   5537 
   5538 	if (sc->sc_type == WM_T_80003)
   5539 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5540 	else
   5541 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5542 
   5543 	/* Writes the control register. */
   5544 	wm_set_vlan(sc);
   5545 
   5546 	if (sc->sc_flags & WM_F_HAS_MII) {
   5547 		uint16_t kmreg;
   5548 
   5549 		switch (sc->sc_type) {
   5550 		case WM_T_80003:
   5551 		case WM_T_ICH8:
   5552 		case WM_T_ICH9:
   5553 		case WM_T_ICH10:
   5554 		case WM_T_PCH:
   5555 		case WM_T_PCH2:
   5556 		case WM_T_PCH_LPT:
   5557 		case WM_T_PCH_SPT:
   5558 		case WM_T_PCH_CNP:
   5559 			/*
   5560 			 * Set the mac to wait the maximum time between each
   5561 			 * iteration and increase the max iterations when
   5562 			 * polling the phy; this fixes erroneous timeouts at
   5563 			 * 10Mbps.
   5564 			 */
   5565 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5566 			    0xFFFF);
   5567 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5568 			    &kmreg);
   5569 			kmreg |= 0x3F;
   5570 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5571 			    kmreg);
   5572 			break;
   5573 		default:
   5574 			break;
   5575 		}
   5576 
   5577 		if (sc->sc_type == WM_T_80003) {
   5578 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5579 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5581 
   5582 			/* Bypass RX and TX FIFO's */
   5583 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5584 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5585 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5586 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5587 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5588 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5589 		}
   5590 	}
   5591 #if 0
   5592 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5593 #endif
   5594 
   5595 	/* Set up checksum offload parameters. */
   5596 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5597 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5598 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5599 		reg |= RXCSUM_IPOFL;
   5600 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5601 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5602 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5603 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5604 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5605 
   5606 	/* Set registers about MSI-X */
   5607 	if (wm_is_using_msix(sc)) {
   5608 		uint32_t ivar;
   5609 		struct wm_queue *wmq;
   5610 		int qid, qintr_idx;
   5611 
   5612 		if (sc->sc_type == WM_T_82575) {
   5613 			/* Interrupt control */
   5614 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5615 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5616 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5617 
   5618 			/* TX and RX */
   5619 			for (i = 0; i < sc->sc_nqueues; i++) {
   5620 				wmq = &sc->sc_queue[i];
   5621 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5622 				    EITR_TX_QUEUE(wmq->wmq_id)
   5623 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5624 			}
   5625 			/* Link status */
   5626 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5627 			    EITR_OTHER);
   5628 		} else if (sc->sc_type == WM_T_82574) {
   5629 			/* Interrupt control */
   5630 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5631 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5632 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5633 
   5634 			/*
   5635 			 * workaround issue with spurious interrupts
   5636 			 * in MSI-X mode.
   5637 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5638 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5639 			 */
   5640 			reg = CSR_READ(sc, WMREG_RFCTL);
   5641 			reg |= WMREG_RFCTL_ACKDIS;
   5642 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5643 
   5644 			ivar = 0;
   5645 			/* TX and RX */
   5646 			for (i = 0; i < sc->sc_nqueues; i++) {
   5647 				wmq = &sc->sc_queue[i];
   5648 				qid = wmq->wmq_id;
   5649 				qintr_idx = wmq->wmq_intr_idx;
   5650 
   5651 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5652 				    IVAR_TX_MASK_Q_82574(qid));
   5653 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5654 				    IVAR_RX_MASK_Q_82574(qid));
   5655 			}
   5656 			/* Link status */
   5657 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5658 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5659 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5660 		} else {
   5661 			/* Interrupt control */
   5662 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5663 			    | GPIE_EIAME | GPIE_PBA);
   5664 
   5665 			switch (sc->sc_type) {
   5666 			case WM_T_82580:
   5667 			case WM_T_I350:
   5668 			case WM_T_I354:
   5669 			case WM_T_I210:
   5670 			case WM_T_I211:
   5671 				/* TX and RX */
   5672 				for (i = 0; i < sc->sc_nqueues; i++) {
   5673 					wmq = &sc->sc_queue[i];
   5674 					qid = wmq->wmq_id;
   5675 					qintr_idx = wmq->wmq_intr_idx;
   5676 
   5677 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5678 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5679 					ivar |= __SHIFTIN((qintr_idx
   5680 						| IVAR_VALID),
   5681 					    IVAR_TX_MASK_Q(qid));
   5682 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5683 					ivar |= __SHIFTIN((qintr_idx
   5684 						| IVAR_VALID),
   5685 					    IVAR_RX_MASK_Q(qid));
   5686 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5687 				}
   5688 				break;
   5689 			case WM_T_82576:
   5690 				/* TX and RX */
   5691 				for (i = 0; i < sc->sc_nqueues; i++) {
   5692 					wmq = &sc->sc_queue[i];
   5693 					qid = wmq->wmq_id;
   5694 					qintr_idx = wmq->wmq_intr_idx;
   5695 
   5696 					ivar = CSR_READ(sc,
   5697 					    WMREG_IVAR_Q_82576(qid));
   5698 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5699 					ivar |= __SHIFTIN((qintr_idx
   5700 						| IVAR_VALID),
   5701 					    IVAR_TX_MASK_Q_82576(qid));
   5702 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5703 					ivar |= __SHIFTIN((qintr_idx
   5704 						| IVAR_VALID),
   5705 					    IVAR_RX_MASK_Q_82576(qid));
   5706 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5707 					    ivar);
   5708 				}
   5709 				break;
   5710 			default:
   5711 				break;
   5712 			}
   5713 
   5714 			/* Link status */
   5715 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5716 			    IVAR_MISC_OTHER);
   5717 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5718 		}
   5719 
   5720 		if (wm_is_using_multiqueue(sc)) {
   5721 			wm_init_rss(sc);
   5722 
   5723 			/*
   5724 			** NOTE: Receive Full-Packet Checksum Offload
   5725 			** is mutually exclusive with Multiqueue. However
   5726 			** this is not the same as TCP/IP checksums which
   5727 			** still work.
   5728 			*/
   5729 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5730 			reg |= RXCSUM_PCSD;
   5731 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5732 		}
   5733 	}
   5734 
   5735 	/* Set up the interrupt registers. */
   5736 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5737 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5738 	    ICR_RXO | ICR_RXT0;
   5739 	if (wm_is_using_msix(sc)) {
   5740 		uint32_t mask;
   5741 		struct wm_queue *wmq;
   5742 
   5743 		switch (sc->sc_type) {
   5744 		case WM_T_82574:
   5745 			mask = 0;
   5746 			for (i = 0; i < sc->sc_nqueues; i++) {
   5747 				wmq = &sc->sc_queue[i];
   5748 				mask |= ICR_TXQ(wmq->wmq_id);
   5749 				mask |= ICR_RXQ(wmq->wmq_id);
   5750 			}
   5751 			mask |= ICR_OTHER;
   5752 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5753 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5754 			break;
   5755 		default:
   5756 			if (sc->sc_type == WM_T_82575) {
   5757 				mask = 0;
   5758 				for (i = 0; i < sc->sc_nqueues; i++) {
   5759 					wmq = &sc->sc_queue[i];
   5760 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5761 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5762 				}
   5763 				mask |= EITR_OTHER;
   5764 			} else {
   5765 				mask = 0;
   5766 				for (i = 0; i < sc->sc_nqueues; i++) {
   5767 					wmq = &sc->sc_queue[i];
   5768 					mask |= 1 << wmq->wmq_intr_idx;
   5769 				}
   5770 				mask |= 1 << sc->sc_link_intr_idx;
   5771 			}
   5772 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5773 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5774 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5775 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5776 			break;
   5777 		}
   5778 	} else
   5779 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5780 
   5781 	/* Set up the inter-packet gap. */
   5782 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5783 
   5784 	if (sc->sc_type >= WM_T_82543) {
   5785 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5786 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5787 			wm_itrs_writereg(sc, wmq);
   5788 		}
   5789 		/*
   5790 		 * Link interrupts occur much less than TX
   5791 		 * interrupts and RX interrupts. So, we don't
   5792 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5793 		 * FreeBSD's if_igb.
   5794 		 */
   5795 	}
   5796 
   5797 	/* Set the VLAN ethernetype. */
   5798 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5799 
   5800 	/*
   5801 	 * Set up the transmit control register; we start out with
   5802 	 * a collision distance suitable for FDX, but update it whe
   5803 	 * we resolve the media type.
   5804 	 */
   5805 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5806 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5807 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5808 	if (sc->sc_type >= WM_T_82571)
   5809 		sc->sc_tctl |= TCTL_MULR;
   5810 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5811 
   5812 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5813 		/* Write TDT after TCTL.EN is set. See the document. */
   5814 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5815 	}
   5816 
   5817 	if (sc->sc_type == WM_T_80003) {
   5818 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5819 		reg &= ~TCTL_EXT_GCEX_MASK;
   5820 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5821 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5822 	}
   5823 
   5824 	/* Set the media. */
   5825 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5826 		goto out;
   5827 
   5828 	/* Configure for OS presence */
   5829 	wm_init_manageability(sc);
   5830 
   5831 	/*
   5832 	 * Set up the receive control register; we actually program
   5833 	 * the register when we set the receive filter.  Use multicast
   5834 	 * address offset type 0.
   5835 	 *
   5836 	 * Only the i82544 has the ability to strip the incoming
   5837 	 * CRC, so we don't enable that feature.
   5838 	 */
   5839 	sc->sc_mchash_type = 0;
   5840 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5841 	    | RCTL_MO(sc->sc_mchash_type);
   5842 
   5843 	/*
   5844 	 * 82574 use one buffer extended Rx descriptor.
   5845 	 */
   5846 	if (sc->sc_type == WM_T_82574)
   5847 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5848 
   5849 	/*
   5850 	 * The I350 has a bug where it always strips the CRC whether
   5851 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5852 	 */
   5853 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5854 	    || (sc->sc_type == WM_T_I210))
   5855 		sc->sc_rctl |= RCTL_SECRC;
   5856 
   5857 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5858 	    && (ifp->if_mtu > ETHERMTU)) {
   5859 		sc->sc_rctl |= RCTL_LPE;
   5860 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5861 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5862 	}
   5863 
   5864 	if (MCLBYTES == 2048) {
   5865 		sc->sc_rctl |= RCTL_2k;
   5866 	} else {
   5867 		if (sc->sc_type >= WM_T_82543) {
   5868 			switch (MCLBYTES) {
   5869 			case 4096:
   5870 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5871 				break;
   5872 			case 8192:
   5873 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5874 				break;
   5875 			case 16384:
   5876 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5877 				break;
   5878 			default:
   5879 				panic("wm_init: MCLBYTES %d unsupported",
   5880 				    MCLBYTES);
   5881 				break;
   5882 			}
   5883 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5884 	}
   5885 
   5886 	/* Enable ECC */
   5887 	switch (sc->sc_type) {
   5888 	case WM_T_82571:
   5889 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5890 		reg |= PBA_ECC_CORR_EN;
   5891 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5892 		break;
   5893 	case WM_T_PCH_LPT:
   5894 	case WM_T_PCH_SPT:
   5895 	case WM_T_PCH_CNP:
   5896 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5897 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5898 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5899 
   5900 		sc->sc_ctrl |= CTRL_MEHE;
   5901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5902 		break;
   5903 	default:
   5904 		break;
   5905 	}
   5906 
   5907 	/*
   5908 	 * Set the receive filter.
   5909 	 *
   5910 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5911 	 * the setting of RCTL.EN in wm_set_filter()
   5912 	 */
   5913 	wm_set_filter(sc);
   5914 
   5915 	/* On 575 and later set RDT only if RX enabled */
   5916 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5917 		int qidx;
   5918 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5919 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5920 			for (i = 0; i < WM_NRXDESC; i++) {
   5921 				mutex_enter(rxq->rxq_lock);
   5922 				wm_init_rxdesc(rxq, i);
   5923 				mutex_exit(rxq->rxq_lock);
   5924 
   5925 			}
   5926 		}
   5927 	}
   5928 
   5929 	wm_unset_stopping_flags(sc);
   5930 
   5931 	/* Start the one second link check clock. */
   5932 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5933 
   5934 	/* ...all done! */
   5935 	ifp->if_flags |= IFF_RUNNING;
   5936 	ifp->if_flags &= ~IFF_OACTIVE;
   5937 
   5938  out:
   5939 	sc->sc_if_flags = ifp->if_flags;
   5940 	if (error)
   5941 		log(LOG_ERR, "%s: interface not running\n",
   5942 		    device_xname(sc->sc_dev));
   5943 	return error;
   5944 }
   5945 
   5946 /*
   5947  * wm_stop:		[ifnet interface function]
   5948  *
   5949  *	Stop transmission on the interface.
   5950  */
   5951 static void
   5952 wm_stop(struct ifnet *ifp, int disable)
   5953 {
   5954 	struct wm_softc *sc = ifp->if_softc;
   5955 
   5956 	WM_CORE_LOCK(sc);
   5957 	wm_stop_locked(ifp, disable);
   5958 	WM_CORE_UNLOCK(sc);
   5959 }
   5960 
   5961 static void
   5962 wm_stop_locked(struct ifnet *ifp, int disable)
   5963 {
   5964 	struct wm_softc *sc = ifp->if_softc;
   5965 	struct wm_txsoft *txs;
   5966 	int i, qidx;
   5967 
   5968 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5969 		device_xname(sc->sc_dev), __func__));
   5970 	KASSERT(WM_CORE_LOCKED(sc));
   5971 
   5972 	wm_set_stopping_flags(sc);
   5973 
   5974 	/* Stop the one second clock. */
   5975 	callout_stop(&sc->sc_tick_ch);
   5976 
   5977 	/* Stop the 82547 Tx FIFO stall check timer. */
   5978 	if (sc->sc_type == WM_T_82547)
   5979 		callout_stop(&sc->sc_txfifo_ch);
   5980 
   5981 	if (sc->sc_flags & WM_F_HAS_MII) {
   5982 		/* Down the MII. */
   5983 		mii_down(&sc->sc_mii);
   5984 	} else {
   5985 #if 0
   5986 		/* Should we clear PHY's status properly? */
   5987 		wm_reset(sc);
   5988 #endif
   5989 	}
   5990 
   5991 	/* Stop the transmit and receive processes. */
   5992 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5993 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5994 	sc->sc_rctl &= ~RCTL_EN;
   5995 
   5996 	/*
   5997 	 * Clear the interrupt mask to ensure the device cannot assert its
   5998 	 * interrupt line.
   5999 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6000 	 * service any currently pending or shared interrupt.
   6001 	 */
   6002 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6003 	sc->sc_icr = 0;
   6004 	if (wm_is_using_msix(sc)) {
   6005 		if (sc->sc_type != WM_T_82574) {
   6006 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6007 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6008 		} else
   6009 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6010 	}
   6011 
   6012 	/* Release any queued transmit buffers. */
   6013 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6014 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6015 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6016 		mutex_enter(txq->txq_lock);
   6017 		txq->txq_sending = false; /* ensure watchdog disabled */
   6018 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6019 			txs = &txq->txq_soft[i];
   6020 			if (txs->txs_mbuf != NULL) {
   6021 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6022 				m_freem(txs->txs_mbuf);
   6023 				txs->txs_mbuf = NULL;
   6024 			}
   6025 		}
   6026 		mutex_exit(txq->txq_lock);
   6027 	}
   6028 
   6029 	/* Mark the interface as down and cancel the watchdog timer. */
   6030 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6031 
   6032 	if (disable) {
   6033 		for (i = 0; i < sc->sc_nqueues; i++) {
   6034 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6035 			mutex_enter(rxq->rxq_lock);
   6036 			wm_rxdrain(rxq);
   6037 			mutex_exit(rxq->rxq_lock);
   6038 		}
   6039 	}
   6040 
   6041 #if 0 /* notyet */
   6042 	if (sc->sc_type >= WM_T_82544)
   6043 		CSR_WRITE(sc, WMREG_WUC, 0);
   6044 #endif
   6045 }
   6046 
   6047 static void
   6048 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6049 {
   6050 	struct mbuf *m;
   6051 	int i;
   6052 
   6053 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6054 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6055 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6056 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6057 		    m->m_data, m->m_len, m->m_flags);
   6058 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6059 	    i, i == 1 ? "" : "s");
   6060 }
   6061 
   6062 /*
   6063  * wm_82547_txfifo_stall:
   6064  *
   6065  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6066  *	reset the FIFO pointers, and restart packet transmission.
   6067  */
   6068 static void
   6069 wm_82547_txfifo_stall(void *arg)
   6070 {
   6071 	struct wm_softc *sc = arg;
   6072 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6073 
   6074 	mutex_enter(txq->txq_lock);
   6075 
   6076 	if (txq->txq_stopping)
   6077 		goto out;
   6078 
   6079 	if (txq->txq_fifo_stall) {
   6080 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6081 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6082 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6083 			/*
   6084 			 * Packets have drained.  Stop transmitter, reset
   6085 			 * FIFO pointers, restart transmitter, and kick
   6086 			 * the packet queue.
   6087 			 */
   6088 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6089 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6090 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6091 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6092 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6093 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6094 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6095 			CSR_WRITE_FLUSH(sc);
   6096 
   6097 			txq->txq_fifo_head = 0;
   6098 			txq->txq_fifo_stall = 0;
   6099 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6100 		} else {
   6101 			/*
   6102 			 * Still waiting for packets to drain; try again in
   6103 			 * another tick.
   6104 			 */
   6105 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6106 		}
   6107 	}
   6108 
   6109 out:
   6110 	mutex_exit(txq->txq_lock);
   6111 }
   6112 
   6113 /*
   6114  * wm_82547_txfifo_bugchk:
   6115  *
   6116  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6117  *	prevent enqueueing a packet that would wrap around the end
   6118  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6119  *
   6120  *	We do this by checking the amount of space before the end
   6121  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6122  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6123  *	the internal FIFO pointers to the beginning, and restart
   6124  *	transmission on the interface.
   6125  */
   6126 #define	WM_FIFO_HDR		0x10
   6127 #define	WM_82547_PAD_LEN	0x3e0
   6128 static int
   6129 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6130 {
   6131 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6132 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6133 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6134 
   6135 	/* Just return if already stalled. */
   6136 	if (txq->txq_fifo_stall)
   6137 		return 1;
   6138 
   6139 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6140 		/* Stall only occurs in half-duplex mode. */
   6141 		goto send_packet;
   6142 	}
   6143 
   6144 	if (len >= WM_82547_PAD_LEN + space) {
   6145 		txq->txq_fifo_stall = 1;
   6146 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6147 		return 1;
   6148 	}
   6149 
   6150  send_packet:
   6151 	txq->txq_fifo_head += len;
   6152 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6153 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6154 
   6155 	return 0;
   6156 }
   6157 
   6158 static int
   6159 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6160 {
   6161 	int error;
   6162 
   6163 	/*
   6164 	 * Allocate the control data structures, and create and load the
   6165 	 * DMA map for it.
   6166 	 *
   6167 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6168 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6169 	 * both sets within the same 4G segment.
   6170 	 */
   6171 	if (sc->sc_type < WM_T_82544)
   6172 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6173 	else
   6174 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6175 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6176 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6177 	else
   6178 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6179 
   6180 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6181 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6182 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6183 		aprint_error_dev(sc->sc_dev,
   6184 		    "unable to allocate TX control data, error = %d\n",
   6185 		    error);
   6186 		goto fail_0;
   6187 	}
   6188 
   6189 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6190 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6191 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6192 		aprint_error_dev(sc->sc_dev,
   6193 		    "unable to map TX control data, error = %d\n", error);
   6194 		goto fail_1;
   6195 	}
   6196 
   6197 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6198 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6199 		aprint_error_dev(sc->sc_dev,
   6200 		    "unable to create TX control data DMA map, error = %d\n",
   6201 		    error);
   6202 		goto fail_2;
   6203 	}
   6204 
   6205 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6206 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6207 		aprint_error_dev(sc->sc_dev,
   6208 		    "unable to load TX control data DMA map, error = %d\n",
   6209 		    error);
   6210 		goto fail_3;
   6211 	}
   6212 
   6213 	return 0;
   6214 
   6215  fail_3:
   6216 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6217  fail_2:
   6218 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6219 	    WM_TXDESCS_SIZE(txq));
   6220  fail_1:
   6221 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6222  fail_0:
   6223 	return error;
   6224 }
   6225 
   6226 static void
   6227 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6228 {
   6229 
   6230 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6231 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6232 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6233 	    WM_TXDESCS_SIZE(txq));
   6234 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6235 }
   6236 
   6237 static int
   6238 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6239 {
   6240 	int error;
   6241 	size_t rxq_descs_size;
   6242 
   6243 	/*
   6244 	 * Allocate the control data structures, and create and load the
   6245 	 * DMA map for it.
   6246 	 *
   6247 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6248 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6249 	 * both sets within the same 4G segment.
   6250 	 */
   6251 	rxq->rxq_ndesc = WM_NRXDESC;
   6252 	if (sc->sc_type == WM_T_82574)
   6253 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6254 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6255 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6256 	else
   6257 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6258 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6259 
   6260 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6261 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6262 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6263 		aprint_error_dev(sc->sc_dev,
   6264 		    "unable to allocate RX control data, error = %d\n",
   6265 		    error);
   6266 		goto fail_0;
   6267 	}
   6268 
   6269 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6270 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6271 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6272 		aprint_error_dev(sc->sc_dev,
   6273 		    "unable to map RX control data, error = %d\n", error);
   6274 		goto fail_1;
   6275 	}
   6276 
   6277 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6278 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6279 		aprint_error_dev(sc->sc_dev,
   6280 		    "unable to create RX control data DMA map, error = %d\n",
   6281 		    error);
   6282 		goto fail_2;
   6283 	}
   6284 
   6285 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6286 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6287 		aprint_error_dev(sc->sc_dev,
   6288 		    "unable to load RX control data DMA map, error = %d\n",
   6289 		    error);
   6290 		goto fail_3;
   6291 	}
   6292 
   6293 	return 0;
   6294 
   6295  fail_3:
   6296 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6297  fail_2:
   6298 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6299 	    rxq_descs_size);
   6300  fail_1:
   6301 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6302  fail_0:
   6303 	return error;
   6304 }
   6305 
   6306 static void
   6307 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6308 {
   6309 
   6310 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6311 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6312 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6313 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6314 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6315 }
   6316 
   6317 
   6318 static int
   6319 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6320 {
   6321 	int i, error;
   6322 
   6323 	/* Create the transmit buffer DMA maps. */
   6324 	WM_TXQUEUELEN(txq) =
   6325 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6326 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6327 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6328 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6329 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6330 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6331 			aprint_error_dev(sc->sc_dev,
   6332 			    "unable to create Tx DMA map %d, error = %d\n",
   6333 			    i, error);
   6334 			goto fail;
   6335 		}
   6336 	}
   6337 
   6338 	return 0;
   6339 
   6340  fail:
   6341 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6342 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6343 			bus_dmamap_destroy(sc->sc_dmat,
   6344 			    txq->txq_soft[i].txs_dmamap);
   6345 	}
   6346 	return error;
   6347 }
   6348 
   6349 static void
   6350 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6351 {
   6352 	int i;
   6353 
   6354 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6355 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6356 			bus_dmamap_destroy(sc->sc_dmat,
   6357 			    txq->txq_soft[i].txs_dmamap);
   6358 	}
   6359 }
   6360 
   6361 static int
   6362 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6363 {
   6364 	int i, error;
   6365 
   6366 	/* Create the receive buffer DMA maps. */
   6367 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6368 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6369 			    MCLBYTES, 0, 0,
   6370 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6371 			aprint_error_dev(sc->sc_dev,
   6372 			    "unable to create Rx DMA map %d error = %d\n",
   6373 			    i, error);
   6374 			goto fail;
   6375 		}
   6376 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6377 	}
   6378 
   6379 	return 0;
   6380 
   6381  fail:
   6382 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6383 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6384 			bus_dmamap_destroy(sc->sc_dmat,
   6385 			    rxq->rxq_soft[i].rxs_dmamap);
   6386 	}
   6387 	return error;
   6388 }
   6389 
   6390 static void
   6391 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6392 {
   6393 	int i;
   6394 
   6395 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6396 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6397 			bus_dmamap_destroy(sc->sc_dmat,
   6398 			    rxq->rxq_soft[i].rxs_dmamap);
   6399 	}
   6400 }
   6401 
   6402 /*
   6403  * wm_alloc_quques:
   6404  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6405  */
   6406 static int
   6407 wm_alloc_txrx_queues(struct wm_softc *sc)
   6408 {
   6409 	int i, error, tx_done, rx_done;
   6410 
   6411 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6412 	    KM_SLEEP);
   6413 	if (sc->sc_queue == NULL) {
   6414 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6415 		error = ENOMEM;
   6416 		goto fail_0;
   6417 	}
   6418 
   6419 	/*
   6420 	 * For transmission
   6421 	 */
   6422 	error = 0;
   6423 	tx_done = 0;
   6424 	for (i = 0; i < sc->sc_nqueues; i++) {
   6425 #ifdef WM_EVENT_COUNTERS
   6426 		int j;
   6427 		const char *xname;
   6428 #endif
   6429 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6430 		txq->txq_sc = sc;
   6431 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6432 
   6433 		error = wm_alloc_tx_descs(sc, txq);
   6434 		if (error)
   6435 			break;
   6436 		error = wm_alloc_tx_buffer(sc, txq);
   6437 		if (error) {
   6438 			wm_free_tx_descs(sc, txq);
   6439 			break;
   6440 		}
   6441 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6442 		if (txq->txq_interq == NULL) {
   6443 			wm_free_tx_descs(sc, txq);
   6444 			wm_free_tx_buffer(sc, txq);
   6445 			error = ENOMEM;
   6446 			break;
   6447 		}
   6448 
   6449 #ifdef WM_EVENT_COUNTERS
   6450 		xname = device_xname(sc->sc_dev);
   6451 
   6452 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6453 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6454 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6455 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6456 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6457 
   6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6460 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6461 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6462 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6463 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6464 
   6465 		for (j = 0; j < WM_NTXSEGS; j++) {
   6466 			snprintf(txq->txq_txseg_evcnt_names[j],
   6467 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6468 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6469 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6470 		}
   6471 
   6472 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6473 
   6474 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6475 #endif /* WM_EVENT_COUNTERS */
   6476 
   6477 		tx_done++;
   6478 	}
   6479 	if (error)
   6480 		goto fail_1;
   6481 
   6482 	/*
   6483 	 * For recieve
   6484 	 */
   6485 	error = 0;
   6486 	rx_done = 0;
   6487 	for (i = 0; i < sc->sc_nqueues; i++) {
   6488 #ifdef WM_EVENT_COUNTERS
   6489 		const char *xname;
   6490 #endif
   6491 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6492 		rxq->rxq_sc = sc;
   6493 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6494 
   6495 		error = wm_alloc_rx_descs(sc, rxq);
   6496 		if (error)
   6497 			break;
   6498 
   6499 		error = wm_alloc_rx_buffer(sc, rxq);
   6500 		if (error) {
   6501 			wm_free_rx_descs(sc, rxq);
   6502 			break;
   6503 		}
   6504 
   6505 #ifdef WM_EVENT_COUNTERS
   6506 		xname = device_xname(sc->sc_dev);
   6507 
   6508 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6509 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6510 
   6511 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6512 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6513 #endif /* WM_EVENT_COUNTERS */
   6514 
   6515 		rx_done++;
   6516 	}
   6517 	if (error)
   6518 		goto fail_2;
   6519 
   6520 	return 0;
   6521 
   6522  fail_2:
   6523 	for (i = 0; i < rx_done; i++) {
   6524 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6525 		wm_free_rx_buffer(sc, rxq);
   6526 		wm_free_rx_descs(sc, rxq);
   6527 		if (rxq->rxq_lock)
   6528 			mutex_obj_free(rxq->rxq_lock);
   6529 	}
   6530  fail_1:
   6531 	for (i = 0; i < tx_done; i++) {
   6532 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6533 		pcq_destroy(txq->txq_interq);
   6534 		wm_free_tx_buffer(sc, txq);
   6535 		wm_free_tx_descs(sc, txq);
   6536 		if (txq->txq_lock)
   6537 			mutex_obj_free(txq->txq_lock);
   6538 	}
   6539 
   6540 	kmem_free(sc->sc_queue,
   6541 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6542  fail_0:
   6543 	return error;
   6544 }
   6545 
   6546 /*
   6547  * wm_free_quques:
   6548  *	Free {tx,rx}descs and {tx,rx} buffers
   6549  */
   6550 static void
   6551 wm_free_txrx_queues(struct wm_softc *sc)
   6552 {
   6553 	int i;
   6554 
   6555 	for (i = 0; i < sc->sc_nqueues; i++) {
   6556 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6557 
   6558 #ifdef WM_EVENT_COUNTERS
   6559 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6560 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6561 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6562 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6563 #endif /* WM_EVENT_COUNTERS */
   6564 
   6565 		wm_free_rx_buffer(sc, rxq);
   6566 		wm_free_rx_descs(sc, rxq);
   6567 		if (rxq->rxq_lock)
   6568 			mutex_obj_free(rxq->rxq_lock);
   6569 	}
   6570 
   6571 	for (i = 0; i < sc->sc_nqueues; i++) {
   6572 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6573 		struct mbuf *m;
   6574 #ifdef WM_EVENT_COUNTERS
   6575 		int j;
   6576 
   6577 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6578 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6579 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6580 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6581 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6582 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6583 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6584 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6585 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6586 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6587 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6588 
   6589 		for (j = 0; j < WM_NTXSEGS; j++)
   6590 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6591 
   6592 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6593 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6594 #endif /* WM_EVENT_COUNTERS */
   6595 
   6596 		/* drain txq_interq */
   6597 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6598 			m_freem(m);
   6599 		pcq_destroy(txq->txq_interq);
   6600 
   6601 		wm_free_tx_buffer(sc, txq);
   6602 		wm_free_tx_descs(sc, txq);
   6603 		if (txq->txq_lock)
   6604 			mutex_obj_free(txq->txq_lock);
   6605 	}
   6606 
   6607 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6608 }
   6609 
   6610 static void
   6611 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6612 {
   6613 
   6614 	KASSERT(mutex_owned(txq->txq_lock));
   6615 
   6616 	/* Initialize the transmit descriptor ring. */
   6617 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6618 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6619 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6620 	txq->txq_free = WM_NTXDESC(txq);
   6621 	txq->txq_next = 0;
   6622 }
   6623 
   6624 static void
   6625 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6626     struct wm_txqueue *txq)
   6627 {
   6628 
   6629 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6630 		device_xname(sc->sc_dev), __func__));
   6631 	KASSERT(mutex_owned(txq->txq_lock));
   6632 
   6633 	if (sc->sc_type < WM_T_82543) {
   6634 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6635 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6636 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6637 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6638 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6639 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6640 	} else {
   6641 		int qid = wmq->wmq_id;
   6642 
   6643 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6644 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6645 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6646 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6647 
   6648 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6649 			/*
   6650 			 * Don't write TDT before TCTL.EN is set.
   6651 			 * See the document.
   6652 			 */
   6653 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6654 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6655 			    | TXDCTL_WTHRESH(0));
   6656 		else {
   6657 			/* XXX should update with AIM? */
   6658 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6659 			if (sc->sc_type >= WM_T_82540) {
   6660 				/* should be same */
   6661 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6662 			}
   6663 
   6664 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6665 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6666 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6667 		}
   6668 	}
   6669 }
   6670 
   6671 static void
   6672 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6673 {
   6674 	int i;
   6675 
   6676 	KASSERT(mutex_owned(txq->txq_lock));
   6677 
   6678 	/* Initialize the transmit job descriptors. */
   6679 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6680 		txq->txq_soft[i].txs_mbuf = NULL;
   6681 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6682 	txq->txq_snext = 0;
   6683 	txq->txq_sdirty = 0;
   6684 }
   6685 
   6686 static void
   6687 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6688     struct wm_txqueue *txq)
   6689 {
   6690 
   6691 	KASSERT(mutex_owned(txq->txq_lock));
   6692 
   6693 	/*
   6694 	 * Set up some register offsets that are different between
   6695 	 * the i82542 and the i82543 and later chips.
   6696 	 */
   6697 	if (sc->sc_type < WM_T_82543)
   6698 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6699 	else
   6700 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6701 
   6702 	wm_init_tx_descs(sc, txq);
   6703 	wm_init_tx_regs(sc, wmq, txq);
   6704 	wm_init_tx_buffer(sc, txq);
   6705 
   6706 	txq->txq_sending = false;
   6707 }
   6708 
   6709 static void
   6710 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6711     struct wm_rxqueue *rxq)
   6712 {
   6713 
   6714 	KASSERT(mutex_owned(rxq->rxq_lock));
   6715 
   6716 	/*
   6717 	 * Initialize the receive descriptor and receive job
   6718 	 * descriptor rings.
   6719 	 */
   6720 	if (sc->sc_type < WM_T_82543) {
   6721 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6722 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6723 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6724 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6725 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6726 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6727 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6728 
   6729 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6730 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6731 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6732 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6733 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6734 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6735 	} else {
   6736 		int qid = wmq->wmq_id;
   6737 
   6738 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6739 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6740 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6741 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6742 
   6743 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6744 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6745 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6746 
   6747 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6748 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6749 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6750 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6751 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6752 			    | RXDCTL_WTHRESH(1));
   6753 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6754 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6755 		} else {
   6756 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6757 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6758 			/* XXX should update with AIM? */
   6759 			CSR_WRITE(sc, WMREG_RDTR,
   6760 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6761 			/* MUST be same */
   6762 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6763 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6764 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6765 		}
   6766 	}
   6767 }
   6768 
   6769 static int
   6770 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6771 {
   6772 	struct wm_rxsoft *rxs;
   6773 	int error, i;
   6774 
   6775 	KASSERT(mutex_owned(rxq->rxq_lock));
   6776 
   6777 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6778 		rxs = &rxq->rxq_soft[i];
   6779 		if (rxs->rxs_mbuf == NULL) {
   6780 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6781 				log(LOG_ERR, "%s: unable to allocate or map "
   6782 				    "rx buffer %d, error = %d\n",
   6783 				    device_xname(sc->sc_dev), i, error);
   6784 				/*
   6785 				 * XXX Should attempt to run with fewer receive
   6786 				 * XXX buffers instead of just failing.
   6787 				 */
   6788 				wm_rxdrain(rxq);
   6789 				return ENOMEM;
   6790 			}
   6791 		} else {
   6792 			/*
   6793 			 * For 82575 and 82576, the RX descriptors must be
   6794 			 * initialized after the setting of RCTL.EN in
   6795 			 * wm_set_filter()
   6796 			 */
   6797 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6798 				wm_init_rxdesc(rxq, i);
   6799 		}
   6800 	}
   6801 	rxq->rxq_ptr = 0;
   6802 	rxq->rxq_discard = 0;
   6803 	WM_RXCHAIN_RESET(rxq);
   6804 
   6805 	return 0;
   6806 }
   6807 
   6808 static int
   6809 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6810     struct wm_rxqueue *rxq)
   6811 {
   6812 
   6813 	KASSERT(mutex_owned(rxq->rxq_lock));
   6814 
   6815 	/*
   6816 	 * Set up some register offsets that are different between
   6817 	 * the i82542 and the i82543 and later chips.
   6818 	 */
   6819 	if (sc->sc_type < WM_T_82543)
   6820 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6821 	else
   6822 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6823 
   6824 	wm_init_rx_regs(sc, wmq, rxq);
   6825 	return wm_init_rx_buffer(sc, rxq);
   6826 }
   6827 
   6828 /*
   6829  * wm_init_quques:
   6830  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6831  */
   6832 static int
   6833 wm_init_txrx_queues(struct wm_softc *sc)
   6834 {
   6835 	int i, error = 0;
   6836 
   6837 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6838 		device_xname(sc->sc_dev), __func__));
   6839 
   6840 	for (i = 0; i < sc->sc_nqueues; i++) {
   6841 		struct wm_queue *wmq = &sc->sc_queue[i];
   6842 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6843 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6844 
   6845 		/*
   6846 		 * TODO
   6847 		 * Currently, use constant variable instead of AIM.
   6848 		 * Furthermore, the interrupt interval of multiqueue which use
   6849 		 * polling mode is less than default value.
   6850 		 * More tuning and AIM are required.
   6851 		 */
   6852 		if (wm_is_using_multiqueue(sc))
   6853 			wmq->wmq_itr = 50;
   6854 		else
   6855 			wmq->wmq_itr = sc->sc_itr_init;
   6856 		wmq->wmq_set_itr = true;
   6857 
   6858 		mutex_enter(txq->txq_lock);
   6859 		wm_init_tx_queue(sc, wmq, txq);
   6860 		mutex_exit(txq->txq_lock);
   6861 
   6862 		mutex_enter(rxq->rxq_lock);
   6863 		error = wm_init_rx_queue(sc, wmq, rxq);
   6864 		mutex_exit(rxq->rxq_lock);
   6865 		if (error)
   6866 			break;
   6867 	}
   6868 
   6869 	return error;
   6870 }
   6871 
   6872 /*
   6873  * wm_tx_offload:
   6874  *
   6875  *	Set up TCP/IP checksumming parameters for the
   6876  *	specified packet.
   6877  */
   6878 static int
   6879 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6880     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6881 {
   6882 	struct mbuf *m0 = txs->txs_mbuf;
   6883 	struct livengood_tcpip_ctxdesc *t;
   6884 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6885 	uint32_t ipcse;
   6886 	struct ether_header *eh;
   6887 	int offset, iphl;
   6888 	uint8_t fields;
   6889 
   6890 	/*
   6891 	 * XXX It would be nice if the mbuf pkthdr had offset
   6892 	 * fields for the protocol headers.
   6893 	 */
   6894 
   6895 	eh = mtod(m0, struct ether_header *);
   6896 	switch (htons(eh->ether_type)) {
   6897 	case ETHERTYPE_IP:
   6898 	case ETHERTYPE_IPV6:
   6899 		offset = ETHER_HDR_LEN;
   6900 		break;
   6901 
   6902 	case ETHERTYPE_VLAN:
   6903 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6904 		break;
   6905 
   6906 	default:
   6907 		/*
   6908 		 * Don't support this protocol or encapsulation.
   6909 		 */
   6910 		*fieldsp = 0;
   6911 		*cmdp = 0;
   6912 		return 0;
   6913 	}
   6914 
   6915 	if ((m0->m_pkthdr.csum_flags &
   6916 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6917 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6918 	} else {
   6919 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6920 	}
   6921 	ipcse = offset + iphl - 1;
   6922 
   6923 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6924 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6925 	seg = 0;
   6926 	fields = 0;
   6927 
   6928 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6929 		int hlen = offset + iphl;
   6930 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6931 
   6932 		if (__predict_false(m0->m_len <
   6933 				    (hlen + sizeof(struct tcphdr)))) {
   6934 			/*
   6935 			 * TCP/IP headers are not in the first mbuf; we need
   6936 			 * to do this the slow and painful way.  Let's just
   6937 			 * hope this doesn't happen very often.
   6938 			 */
   6939 			struct tcphdr th;
   6940 
   6941 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6942 
   6943 			m_copydata(m0, hlen, sizeof(th), &th);
   6944 			if (v4) {
   6945 				struct ip ip;
   6946 
   6947 				m_copydata(m0, offset, sizeof(ip), &ip);
   6948 				ip.ip_len = 0;
   6949 				m_copyback(m0,
   6950 				    offset + offsetof(struct ip, ip_len),
   6951 				    sizeof(ip.ip_len), &ip.ip_len);
   6952 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6953 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6954 			} else {
   6955 				struct ip6_hdr ip6;
   6956 
   6957 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6958 				ip6.ip6_plen = 0;
   6959 				m_copyback(m0,
   6960 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6961 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6962 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6963 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6964 			}
   6965 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6966 			    sizeof(th.th_sum), &th.th_sum);
   6967 
   6968 			hlen += th.th_off << 2;
   6969 		} else {
   6970 			/*
   6971 			 * TCP/IP headers are in the first mbuf; we can do
   6972 			 * this the easy way.
   6973 			 */
   6974 			struct tcphdr *th;
   6975 
   6976 			if (v4) {
   6977 				struct ip *ip =
   6978 				    (void *)(mtod(m0, char *) + offset);
   6979 				th = (void *)(mtod(m0, char *) + hlen);
   6980 
   6981 				ip->ip_len = 0;
   6982 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6983 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6984 			} else {
   6985 				struct ip6_hdr *ip6 =
   6986 				    (void *)(mtod(m0, char *) + offset);
   6987 				th = (void *)(mtod(m0, char *) + hlen);
   6988 
   6989 				ip6->ip6_plen = 0;
   6990 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6991 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6992 			}
   6993 			hlen += th->th_off << 2;
   6994 		}
   6995 
   6996 		if (v4) {
   6997 			WM_Q_EVCNT_INCR(txq, txtso);
   6998 			cmdlen |= WTX_TCPIP_CMD_IP;
   6999 		} else {
   7000 			WM_Q_EVCNT_INCR(txq, txtso6);
   7001 			ipcse = 0;
   7002 		}
   7003 		cmd |= WTX_TCPIP_CMD_TSE;
   7004 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7005 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7006 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7007 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7008 	}
   7009 
   7010 	/*
   7011 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7012 	 * offload feature, if we load the context descriptor, we
   7013 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7014 	 */
   7015 
   7016 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7017 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7018 	    WTX_TCPIP_IPCSE(ipcse);
   7019 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7020 		WM_Q_EVCNT_INCR(txq, txipsum);
   7021 		fields |= WTX_IXSM;
   7022 	}
   7023 
   7024 	offset += iphl;
   7025 
   7026 	if (m0->m_pkthdr.csum_flags &
   7027 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7028 		WM_Q_EVCNT_INCR(txq, txtusum);
   7029 		fields |= WTX_TXSM;
   7030 		tucs = WTX_TCPIP_TUCSS(offset) |
   7031 		    WTX_TCPIP_TUCSO(offset +
   7032 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7033 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7034 	} else if ((m0->m_pkthdr.csum_flags &
   7035 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7036 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7037 		fields |= WTX_TXSM;
   7038 		tucs = WTX_TCPIP_TUCSS(offset) |
   7039 		    WTX_TCPIP_TUCSO(offset +
   7040 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7041 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7042 	} else {
   7043 		/* Just initialize it to a valid TCP context. */
   7044 		tucs = WTX_TCPIP_TUCSS(offset) |
   7045 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7046 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7047 	}
   7048 
   7049 	/*
   7050 	 * We don't have to write context descriptor for every packet
   7051 	 * except for 82574. For 82574, we must write context descriptor
   7052 	 * for every packet when we use two descriptor queues.
   7053 	 * It would be overhead to write context descriptor for every packet,
   7054 	 * however it does not cause problems.
   7055 	 */
   7056 	/* Fill in the context descriptor. */
   7057 	t = (struct livengood_tcpip_ctxdesc *)
   7058 	    &txq->txq_descs[txq->txq_next];
   7059 	t->tcpip_ipcs = htole32(ipcs);
   7060 	t->tcpip_tucs = htole32(tucs);
   7061 	t->tcpip_cmdlen = htole32(cmdlen);
   7062 	t->tcpip_seg = htole32(seg);
   7063 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7064 
   7065 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7066 	txs->txs_ndesc++;
   7067 
   7068 	*cmdp = cmd;
   7069 	*fieldsp = fields;
   7070 
   7071 	return 0;
   7072 }
   7073 
   7074 static inline int
   7075 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7076 {
   7077 	struct wm_softc *sc = ifp->if_softc;
   7078 	u_int cpuid = cpu_index(curcpu());
   7079 
   7080 	/*
   7081 	 * Currently, simple distribute strategy.
   7082 	 * TODO:
   7083 	 * distribute by flowid(RSS has value).
   7084 	 */
   7085         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7086 }
   7087 
   7088 /*
   7089  * wm_start:		[ifnet interface function]
   7090  *
   7091  *	Start packet transmission on the interface.
   7092  */
   7093 static void
   7094 wm_start(struct ifnet *ifp)
   7095 {
   7096 	struct wm_softc *sc = ifp->if_softc;
   7097 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7098 
   7099 #ifdef WM_MPSAFE
   7100 	KASSERT(if_is_mpsafe(ifp));
   7101 #endif
   7102 	/*
   7103 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7104 	 */
   7105 
   7106 	mutex_enter(txq->txq_lock);
   7107 	if (!txq->txq_stopping)
   7108 		wm_start_locked(ifp);
   7109 	mutex_exit(txq->txq_lock);
   7110 }
   7111 
   7112 static void
   7113 wm_start_locked(struct ifnet *ifp)
   7114 {
   7115 	struct wm_softc *sc = ifp->if_softc;
   7116 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7117 
   7118 	wm_send_common_locked(ifp, txq, false);
   7119 }
   7120 
   7121 static int
   7122 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7123 {
   7124 	int qid;
   7125 	struct wm_softc *sc = ifp->if_softc;
   7126 	struct wm_txqueue *txq;
   7127 
   7128 	qid = wm_select_txqueue(ifp, m);
   7129 	txq = &sc->sc_queue[qid].wmq_txq;
   7130 
   7131 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7132 		m_freem(m);
   7133 		WM_Q_EVCNT_INCR(txq, txdrop);
   7134 		return ENOBUFS;
   7135 	}
   7136 
   7137 	/*
   7138 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7139 	 */
   7140 	ifp->if_obytes += m->m_pkthdr.len;
   7141 	if (m->m_flags & M_MCAST)
   7142 		ifp->if_omcasts++;
   7143 
   7144 	if (mutex_tryenter(txq->txq_lock)) {
   7145 		if (!txq->txq_stopping)
   7146 			wm_transmit_locked(ifp, txq);
   7147 		mutex_exit(txq->txq_lock);
   7148 	}
   7149 
   7150 	return 0;
   7151 }
   7152 
   7153 static void
   7154 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7155 {
   7156 
   7157 	wm_send_common_locked(ifp, txq, true);
   7158 }
   7159 
   7160 static void
   7161 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7162     bool is_transmit)
   7163 {
   7164 	struct wm_softc *sc = ifp->if_softc;
   7165 	struct mbuf *m0;
   7166 	struct wm_txsoft *txs;
   7167 	bus_dmamap_t dmamap;
   7168 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7169 	bus_addr_t curaddr;
   7170 	bus_size_t seglen, curlen;
   7171 	uint32_t cksumcmd;
   7172 	uint8_t cksumfields;
   7173 
   7174 	KASSERT(mutex_owned(txq->txq_lock));
   7175 
   7176 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7177 		return;
   7178 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7179 		return;
   7180 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7181 		return;
   7182 
   7183 	/* Remember the previous number of free descriptors. */
   7184 	ofree = txq->txq_free;
   7185 
   7186 	/*
   7187 	 * Loop through the send queue, setting up transmit descriptors
   7188 	 * until we drain the queue, or use up all available transmit
   7189 	 * descriptors.
   7190 	 */
   7191 	for (;;) {
   7192 		m0 = NULL;
   7193 
   7194 		/* Get a work queue entry. */
   7195 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7196 			wm_txeof(txq, UINT_MAX);
   7197 			if (txq->txq_sfree == 0) {
   7198 				DPRINTF(WM_DEBUG_TX,
   7199 				    ("%s: TX: no free job descriptors\n",
   7200 					device_xname(sc->sc_dev)));
   7201 				WM_Q_EVCNT_INCR(txq, txsstall);
   7202 				break;
   7203 			}
   7204 		}
   7205 
   7206 		/* Grab a packet off the queue. */
   7207 		if (is_transmit)
   7208 			m0 = pcq_get(txq->txq_interq);
   7209 		else
   7210 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7211 		if (m0 == NULL)
   7212 			break;
   7213 
   7214 		DPRINTF(WM_DEBUG_TX,
   7215 		    ("%s: TX: have packet to transmit: %p\n",
   7216 		    device_xname(sc->sc_dev), m0));
   7217 
   7218 		txs = &txq->txq_soft[txq->txq_snext];
   7219 		dmamap = txs->txs_dmamap;
   7220 
   7221 		use_tso = (m0->m_pkthdr.csum_flags &
   7222 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7223 
   7224 		/*
   7225 		 * So says the Linux driver:
   7226 		 * The controller does a simple calculation to make sure
   7227 		 * there is enough room in the FIFO before initiating the
   7228 		 * DMA for each buffer.  The calc is:
   7229 		 *	4 = ceil(buffer len / MSS)
   7230 		 * To make sure we don't overrun the FIFO, adjust the max
   7231 		 * buffer len if the MSS drops.
   7232 		 */
   7233 		dmamap->dm_maxsegsz =
   7234 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7235 		    ? m0->m_pkthdr.segsz << 2
   7236 		    : WTX_MAX_LEN;
   7237 
   7238 		/*
   7239 		 * Load the DMA map.  If this fails, the packet either
   7240 		 * didn't fit in the allotted number of segments, or we
   7241 		 * were short on resources.  For the too-many-segments
   7242 		 * case, we simply report an error and drop the packet,
   7243 		 * since we can't sanely copy a jumbo packet to a single
   7244 		 * buffer.
   7245 		 */
   7246 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7247 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7248 		if (error) {
   7249 			if (error == EFBIG) {
   7250 				WM_Q_EVCNT_INCR(txq, txdrop);
   7251 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7252 				    "DMA segments, dropping...\n",
   7253 				    device_xname(sc->sc_dev));
   7254 				wm_dump_mbuf_chain(sc, m0);
   7255 				m_freem(m0);
   7256 				continue;
   7257 			}
   7258 			/*  Short on resources, just stop for now. */
   7259 			DPRINTF(WM_DEBUG_TX,
   7260 			    ("%s: TX: dmamap load failed: %d\n",
   7261 			    device_xname(sc->sc_dev), error));
   7262 			break;
   7263 		}
   7264 
   7265 		segs_needed = dmamap->dm_nsegs;
   7266 		if (use_tso) {
   7267 			/* For sentinel descriptor; see below. */
   7268 			segs_needed++;
   7269 		}
   7270 
   7271 		/*
   7272 		 * Ensure we have enough descriptors free to describe
   7273 		 * the packet.  Note, we always reserve one descriptor
   7274 		 * at the end of the ring due to the semantics of the
   7275 		 * TDT register, plus one more in the event we need
   7276 		 * to load offload context.
   7277 		 */
   7278 		if (segs_needed > txq->txq_free - 2) {
   7279 			/*
   7280 			 * Not enough free descriptors to transmit this
   7281 			 * packet.  We haven't committed anything yet,
   7282 			 * so just unload the DMA map, put the packet
   7283 			 * pack on the queue, and punt.  Notify the upper
   7284 			 * layer that there are no more slots left.
   7285 			 */
   7286 			DPRINTF(WM_DEBUG_TX,
   7287 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7288 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7289 			    segs_needed, txq->txq_free - 1));
   7290 			if (!is_transmit)
   7291 				ifp->if_flags |= IFF_OACTIVE;
   7292 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7293 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7294 			WM_Q_EVCNT_INCR(txq, txdstall);
   7295 			break;
   7296 		}
   7297 
   7298 		/*
   7299 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7300 		 * once we know we can transmit the packet, since we
   7301 		 * do some internal FIFO space accounting here.
   7302 		 */
   7303 		if (sc->sc_type == WM_T_82547 &&
   7304 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7305 			DPRINTF(WM_DEBUG_TX,
   7306 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7307 			    device_xname(sc->sc_dev)));
   7308 			if (!is_transmit)
   7309 				ifp->if_flags |= IFF_OACTIVE;
   7310 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7311 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7312 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7313 			break;
   7314 		}
   7315 
   7316 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7317 
   7318 		DPRINTF(WM_DEBUG_TX,
   7319 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7320 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7321 
   7322 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7323 
   7324 		/*
   7325 		 * Store a pointer to the packet so that we can free it
   7326 		 * later.
   7327 		 *
   7328 		 * Initially, we consider the number of descriptors the
   7329 		 * packet uses the number of DMA segments.  This may be
   7330 		 * incremented by 1 if we do checksum offload (a descriptor
   7331 		 * is used to set the checksum context).
   7332 		 */
   7333 		txs->txs_mbuf = m0;
   7334 		txs->txs_firstdesc = txq->txq_next;
   7335 		txs->txs_ndesc = segs_needed;
   7336 
   7337 		/* Set up offload parameters for this packet. */
   7338 		if (m0->m_pkthdr.csum_flags &
   7339 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7340 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7341 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7342 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7343 					  &cksumfields) != 0) {
   7344 				/* Error message already displayed. */
   7345 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7346 				continue;
   7347 			}
   7348 		} else {
   7349 			cksumcmd = 0;
   7350 			cksumfields = 0;
   7351 		}
   7352 
   7353 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7354 
   7355 		/* Sync the DMA map. */
   7356 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7357 		    BUS_DMASYNC_PREWRITE);
   7358 
   7359 		/* Initialize the transmit descriptor. */
   7360 		for (nexttx = txq->txq_next, seg = 0;
   7361 		     seg < dmamap->dm_nsegs; seg++) {
   7362 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7363 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7364 			     seglen != 0;
   7365 			     curaddr += curlen, seglen -= curlen,
   7366 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7367 				curlen = seglen;
   7368 
   7369 				/*
   7370 				 * So says the Linux driver:
   7371 				 * Work around for premature descriptor
   7372 				 * write-backs in TSO mode.  Append a
   7373 				 * 4-byte sentinel descriptor.
   7374 				 */
   7375 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7376 				    curlen > 8)
   7377 					curlen -= 4;
   7378 
   7379 				wm_set_dma_addr(
   7380 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7381 				txq->txq_descs[nexttx].wtx_cmdlen
   7382 				    = htole32(cksumcmd | curlen);
   7383 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7384 				    = 0;
   7385 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7386 				    = cksumfields;
   7387 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7388 				lasttx = nexttx;
   7389 
   7390 				DPRINTF(WM_DEBUG_TX,
   7391 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7392 				     "len %#04zx\n",
   7393 				    device_xname(sc->sc_dev), nexttx,
   7394 				    (uint64_t)curaddr, curlen));
   7395 			}
   7396 		}
   7397 
   7398 		KASSERT(lasttx != -1);
   7399 
   7400 		/*
   7401 		 * Set up the command byte on the last descriptor of
   7402 		 * the packet.  If we're in the interrupt delay window,
   7403 		 * delay the interrupt.
   7404 		 */
   7405 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7406 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7407 
   7408 		/*
   7409 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7410 		 * up the descriptor to encapsulate the packet for us.
   7411 		 *
   7412 		 * This is only valid on the last descriptor of the packet.
   7413 		 */
   7414 		if (vlan_has_tag(m0)) {
   7415 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7416 			    htole32(WTX_CMD_VLE);
   7417 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7418 			    = htole16(vlan_get_tag(m0));
   7419 		}
   7420 
   7421 		txs->txs_lastdesc = lasttx;
   7422 
   7423 		DPRINTF(WM_DEBUG_TX,
   7424 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7425 		    device_xname(sc->sc_dev),
   7426 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7427 
   7428 		/* Sync the descriptors we're using. */
   7429 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7430 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7431 
   7432 		/* Give the packet to the chip. */
   7433 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7434 
   7435 		DPRINTF(WM_DEBUG_TX,
   7436 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7437 
   7438 		DPRINTF(WM_DEBUG_TX,
   7439 		    ("%s: TX: finished transmitting packet, job %d\n",
   7440 		    device_xname(sc->sc_dev), txq->txq_snext));
   7441 
   7442 		/* Advance the tx pointer. */
   7443 		txq->txq_free -= txs->txs_ndesc;
   7444 		txq->txq_next = nexttx;
   7445 
   7446 		txq->txq_sfree--;
   7447 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7448 
   7449 		/* Pass the packet to any BPF listeners. */
   7450 		bpf_mtap(ifp, m0);
   7451 	}
   7452 
   7453 	if (m0 != NULL) {
   7454 		if (!is_transmit)
   7455 			ifp->if_flags |= IFF_OACTIVE;
   7456 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7457 		WM_Q_EVCNT_INCR(txq, txdrop);
   7458 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7459 			__func__));
   7460 		m_freem(m0);
   7461 	}
   7462 
   7463 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7464 		/* No more slots; notify upper layer. */
   7465 		if (!is_transmit)
   7466 			ifp->if_flags |= IFF_OACTIVE;
   7467 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7468 	}
   7469 
   7470 	if (txq->txq_free != ofree) {
   7471 		/* Set a watchdog timer in case the chip flakes out. */
   7472 		txq->txq_lastsent = time_uptime;
   7473 		txq->txq_sending = true;
   7474 	}
   7475 }
   7476 
   7477 /*
   7478  * wm_nq_tx_offload:
   7479  *
   7480  *	Set up TCP/IP checksumming parameters for the
   7481  *	specified packet, for NEWQUEUE devices
   7482  */
   7483 static int
   7484 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7485     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7486 {
   7487 	struct mbuf *m0 = txs->txs_mbuf;
   7488 	uint32_t vl_len, mssidx, cmdc;
   7489 	struct ether_header *eh;
   7490 	int offset, iphl;
   7491 
   7492 	/*
   7493 	 * XXX It would be nice if the mbuf pkthdr had offset
   7494 	 * fields for the protocol headers.
   7495 	 */
   7496 	*cmdlenp = 0;
   7497 	*fieldsp = 0;
   7498 
   7499 	eh = mtod(m0, struct ether_header *);
   7500 	switch (htons(eh->ether_type)) {
   7501 	case ETHERTYPE_IP:
   7502 	case ETHERTYPE_IPV6:
   7503 		offset = ETHER_HDR_LEN;
   7504 		break;
   7505 
   7506 	case ETHERTYPE_VLAN:
   7507 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7508 		break;
   7509 
   7510 	default:
   7511 		/* Don't support this protocol or encapsulation. */
   7512 		*do_csum = false;
   7513 		return 0;
   7514 	}
   7515 	*do_csum = true;
   7516 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7517 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7518 
   7519 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7520 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7521 
   7522 	if ((m0->m_pkthdr.csum_flags &
   7523 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7524 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7525 	} else {
   7526 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7527 	}
   7528 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7529 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7530 
   7531 	if (vlan_has_tag(m0)) {
   7532 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7533 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7534 		*cmdlenp |= NQTX_CMD_VLE;
   7535 	}
   7536 
   7537 	mssidx = 0;
   7538 
   7539 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7540 		int hlen = offset + iphl;
   7541 		int tcp_hlen;
   7542 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7543 
   7544 		if (__predict_false(m0->m_len <
   7545 				    (hlen + sizeof(struct tcphdr)))) {
   7546 			/*
   7547 			 * TCP/IP headers are not in the first mbuf; we need
   7548 			 * to do this the slow and painful way.  Let's just
   7549 			 * hope this doesn't happen very often.
   7550 			 */
   7551 			struct tcphdr th;
   7552 
   7553 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7554 
   7555 			m_copydata(m0, hlen, sizeof(th), &th);
   7556 			if (v4) {
   7557 				struct ip ip;
   7558 
   7559 				m_copydata(m0, offset, sizeof(ip), &ip);
   7560 				ip.ip_len = 0;
   7561 				m_copyback(m0,
   7562 				    offset + offsetof(struct ip, ip_len),
   7563 				    sizeof(ip.ip_len), &ip.ip_len);
   7564 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7565 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7566 			} else {
   7567 				struct ip6_hdr ip6;
   7568 
   7569 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7570 				ip6.ip6_plen = 0;
   7571 				m_copyback(m0,
   7572 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7573 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7574 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7575 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7576 			}
   7577 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7578 			    sizeof(th.th_sum), &th.th_sum);
   7579 
   7580 			tcp_hlen = th.th_off << 2;
   7581 		} else {
   7582 			/*
   7583 			 * TCP/IP headers are in the first mbuf; we can do
   7584 			 * this the easy way.
   7585 			 */
   7586 			struct tcphdr *th;
   7587 
   7588 			if (v4) {
   7589 				struct ip *ip =
   7590 				    (void *)(mtod(m0, char *) + offset);
   7591 				th = (void *)(mtod(m0, char *) + hlen);
   7592 
   7593 				ip->ip_len = 0;
   7594 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7595 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7596 			} else {
   7597 				struct ip6_hdr *ip6 =
   7598 				    (void *)(mtod(m0, char *) + offset);
   7599 				th = (void *)(mtod(m0, char *) + hlen);
   7600 
   7601 				ip6->ip6_plen = 0;
   7602 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7603 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7604 			}
   7605 			tcp_hlen = th->th_off << 2;
   7606 		}
   7607 		hlen += tcp_hlen;
   7608 		*cmdlenp |= NQTX_CMD_TSE;
   7609 
   7610 		if (v4) {
   7611 			WM_Q_EVCNT_INCR(txq, txtso);
   7612 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7613 		} else {
   7614 			WM_Q_EVCNT_INCR(txq, txtso6);
   7615 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7616 		}
   7617 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7618 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7619 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7620 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7621 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7622 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7623 	} else {
   7624 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7625 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7626 	}
   7627 
   7628 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7629 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7630 		cmdc |= NQTXC_CMD_IP4;
   7631 	}
   7632 
   7633 	if (m0->m_pkthdr.csum_flags &
   7634 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7635 		WM_Q_EVCNT_INCR(txq, txtusum);
   7636 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7637 			cmdc |= NQTXC_CMD_TCP;
   7638 		} else {
   7639 			cmdc |= NQTXC_CMD_UDP;
   7640 		}
   7641 		cmdc |= NQTXC_CMD_IP4;
   7642 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7643 	}
   7644 	if (m0->m_pkthdr.csum_flags &
   7645 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7646 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7647 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7648 			cmdc |= NQTXC_CMD_TCP;
   7649 		} else {
   7650 			cmdc |= NQTXC_CMD_UDP;
   7651 		}
   7652 		cmdc |= NQTXC_CMD_IP6;
   7653 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7654 	}
   7655 
   7656 	/*
   7657 	 * We don't have to write context descriptor for every packet to
   7658 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7659 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7660 	 * controllers.
   7661 	 * It would be overhead to write context descriptor for every packet,
   7662 	 * however it does not cause problems.
   7663 	 */
   7664 	/* Fill in the context descriptor. */
   7665 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7666 	    htole32(vl_len);
   7667 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7668 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7669 	    htole32(cmdc);
   7670 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7671 	    htole32(mssidx);
   7672 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7673 	DPRINTF(WM_DEBUG_TX,
   7674 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7675 	    txq->txq_next, 0, vl_len));
   7676 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7677 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7678 	txs->txs_ndesc++;
   7679 	return 0;
   7680 }
   7681 
   7682 /*
   7683  * wm_nq_start:		[ifnet interface function]
   7684  *
   7685  *	Start packet transmission on the interface for NEWQUEUE devices
   7686  */
   7687 static void
   7688 wm_nq_start(struct ifnet *ifp)
   7689 {
   7690 	struct wm_softc *sc = ifp->if_softc;
   7691 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7692 
   7693 #ifdef WM_MPSAFE
   7694 	KASSERT(if_is_mpsafe(ifp));
   7695 #endif
   7696 	/*
   7697 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7698 	 */
   7699 
   7700 	mutex_enter(txq->txq_lock);
   7701 	if (!txq->txq_stopping)
   7702 		wm_nq_start_locked(ifp);
   7703 	mutex_exit(txq->txq_lock);
   7704 }
   7705 
   7706 static void
   7707 wm_nq_start_locked(struct ifnet *ifp)
   7708 {
   7709 	struct wm_softc *sc = ifp->if_softc;
   7710 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7711 
   7712 	wm_nq_send_common_locked(ifp, txq, false);
   7713 }
   7714 
   7715 static int
   7716 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7717 {
   7718 	int qid;
   7719 	struct wm_softc *sc = ifp->if_softc;
   7720 	struct wm_txqueue *txq;
   7721 
   7722 	qid = wm_select_txqueue(ifp, m);
   7723 	txq = &sc->sc_queue[qid].wmq_txq;
   7724 
   7725 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7726 		m_freem(m);
   7727 		WM_Q_EVCNT_INCR(txq, txdrop);
   7728 		return ENOBUFS;
   7729 	}
   7730 
   7731 	/*
   7732 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7733 	 */
   7734 	ifp->if_obytes += m->m_pkthdr.len;
   7735 	if (m->m_flags & M_MCAST)
   7736 		ifp->if_omcasts++;
   7737 
   7738 	/*
   7739 	 * The situations which this mutex_tryenter() fails at running time
   7740 	 * are below two patterns.
   7741 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7742 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7743 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7744 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7745 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7746 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7747 	 * stuck, either.
   7748 	 */
   7749 	if (mutex_tryenter(txq->txq_lock)) {
   7750 		if (!txq->txq_stopping)
   7751 			wm_nq_transmit_locked(ifp, txq);
   7752 		mutex_exit(txq->txq_lock);
   7753 	}
   7754 
   7755 	return 0;
   7756 }
   7757 
   7758 static void
   7759 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7760 {
   7761 
   7762 	wm_nq_send_common_locked(ifp, txq, true);
   7763 }
   7764 
   7765 static void
   7766 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7767     bool is_transmit)
   7768 {
   7769 	struct wm_softc *sc = ifp->if_softc;
   7770 	struct mbuf *m0;
   7771 	struct wm_txsoft *txs;
   7772 	bus_dmamap_t dmamap;
   7773 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7774 	bool do_csum, sent;
   7775 
   7776 	KASSERT(mutex_owned(txq->txq_lock));
   7777 
   7778 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7779 		return;
   7780 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7781 		return;
   7782 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7783 		return;
   7784 
   7785 	sent = false;
   7786 
   7787 	/*
   7788 	 * Loop through the send queue, setting up transmit descriptors
   7789 	 * until we drain the queue, or use up all available transmit
   7790 	 * descriptors.
   7791 	 */
   7792 	for (;;) {
   7793 		m0 = NULL;
   7794 
   7795 		/* Get a work queue entry. */
   7796 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7797 			wm_txeof(txq, UINT_MAX);
   7798 			if (txq->txq_sfree == 0) {
   7799 				DPRINTF(WM_DEBUG_TX,
   7800 				    ("%s: TX: no free job descriptors\n",
   7801 					device_xname(sc->sc_dev)));
   7802 				WM_Q_EVCNT_INCR(txq, txsstall);
   7803 				break;
   7804 			}
   7805 		}
   7806 
   7807 		/* Grab a packet off the queue. */
   7808 		if (is_transmit)
   7809 			m0 = pcq_get(txq->txq_interq);
   7810 		else
   7811 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7812 		if (m0 == NULL)
   7813 			break;
   7814 
   7815 		DPRINTF(WM_DEBUG_TX,
   7816 		    ("%s: TX: have packet to transmit: %p\n",
   7817 		    device_xname(sc->sc_dev), m0));
   7818 
   7819 		txs = &txq->txq_soft[txq->txq_snext];
   7820 		dmamap = txs->txs_dmamap;
   7821 
   7822 		/*
   7823 		 * Load the DMA map.  If this fails, the packet either
   7824 		 * didn't fit in the allotted number of segments, or we
   7825 		 * were short on resources.  For the too-many-segments
   7826 		 * case, we simply report an error and drop the packet,
   7827 		 * since we can't sanely copy a jumbo packet to a single
   7828 		 * buffer.
   7829 		 */
   7830 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7831 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7832 		if (error) {
   7833 			if (error == EFBIG) {
   7834 				WM_Q_EVCNT_INCR(txq, txdrop);
   7835 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7836 				    "DMA segments, dropping...\n",
   7837 				    device_xname(sc->sc_dev));
   7838 				wm_dump_mbuf_chain(sc, m0);
   7839 				m_freem(m0);
   7840 				continue;
   7841 			}
   7842 			/* Short on resources, just stop for now. */
   7843 			DPRINTF(WM_DEBUG_TX,
   7844 			    ("%s: TX: dmamap load failed: %d\n",
   7845 			    device_xname(sc->sc_dev), error));
   7846 			break;
   7847 		}
   7848 
   7849 		segs_needed = dmamap->dm_nsegs;
   7850 
   7851 		/*
   7852 		 * Ensure we have enough descriptors free to describe
   7853 		 * the packet.  Note, we always reserve one descriptor
   7854 		 * at the end of the ring due to the semantics of the
   7855 		 * TDT register, plus one more in the event we need
   7856 		 * to load offload context.
   7857 		 */
   7858 		if (segs_needed > txq->txq_free - 2) {
   7859 			/*
   7860 			 * Not enough free descriptors to transmit this
   7861 			 * packet.  We haven't committed anything yet,
   7862 			 * so just unload the DMA map, put the packet
   7863 			 * pack on the queue, and punt.  Notify the upper
   7864 			 * layer that there are no more slots left.
   7865 			 */
   7866 			DPRINTF(WM_DEBUG_TX,
   7867 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7868 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7869 			    segs_needed, txq->txq_free - 1));
   7870 			if (!is_transmit)
   7871 				ifp->if_flags |= IFF_OACTIVE;
   7872 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7873 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7874 			WM_Q_EVCNT_INCR(txq, txdstall);
   7875 			break;
   7876 		}
   7877 
   7878 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7879 
   7880 		DPRINTF(WM_DEBUG_TX,
   7881 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7882 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7883 
   7884 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7885 
   7886 		/*
   7887 		 * Store a pointer to the packet so that we can free it
   7888 		 * later.
   7889 		 *
   7890 		 * Initially, we consider the number of descriptors the
   7891 		 * packet uses the number of DMA segments.  This may be
   7892 		 * incremented by 1 if we do checksum offload (a descriptor
   7893 		 * is used to set the checksum context).
   7894 		 */
   7895 		txs->txs_mbuf = m0;
   7896 		txs->txs_firstdesc = txq->txq_next;
   7897 		txs->txs_ndesc = segs_needed;
   7898 
   7899 		/* Set up offload parameters for this packet. */
   7900 		uint32_t cmdlen, fields, dcmdlen;
   7901 		if (m0->m_pkthdr.csum_flags &
   7902 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7903 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7904 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7905 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7906 			    &do_csum) != 0) {
   7907 				/* Error message already displayed. */
   7908 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7909 				continue;
   7910 			}
   7911 		} else {
   7912 			do_csum = false;
   7913 			cmdlen = 0;
   7914 			fields = 0;
   7915 		}
   7916 
   7917 		/* Sync the DMA map. */
   7918 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7919 		    BUS_DMASYNC_PREWRITE);
   7920 
   7921 		/* Initialize the first transmit descriptor. */
   7922 		nexttx = txq->txq_next;
   7923 		if (!do_csum) {
   7924 			/* setup a legacy descriptor */
   7925 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7926 			    dmamap->dm_segs[0].ds_addr);
   7927 			txq->txq_descs[nexttx].wtx_cmdlen =
   7928 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7929 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7930 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7931 			if (vlan_has_tag(m0)) {
   7932 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7933 				    htole32(WTX_CMD_VLE);
   7934 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7935 				    htole16(vlan_get_tag(m0));
   7936 			} else {
   7937 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7938 			}
   7939 			dcmdlen = 0;
   7940 		} else {
   7941 			/* setup an advanced data descriptor */
   7942 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7943 			    htole64(dmamap->dm_segs[0].ds_addr);
   7944 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7945 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7946 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7947 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7948 			    htole32(fields);
   7949 			DPRINTF(WM_DEBUG_TX,
   7950 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7951 			    device_xname(sc->sc_dev), nexttx,
   7952 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7953 			DPRINTF(WM_DEBUG_TX,
   7954 			    ("\t 0x%08x%08x\n", fields,
   7955 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7956 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7957 		}
   7958 
   7959 		lasttx = nexttx;
   7960 		nexttx = WM_NEXTTX(txq, nexttx);
   7961 		/*
   7962 		 * fill in the next descriptors. legacy or advanced format
   7963 		 * is the same here
   7964 		 */
   7965 		for (seg = 1; seg < dmamap->dm_nsegs;
   7966 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7967 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7968 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7969 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7970 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7971 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7972 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7973 			lasttx = nexttx;
   7974 
   7975 			DPRINTF(WM_DEBUG_TX,
   7976 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7977 			     "len %#04zx\n",
   7978 			    device_xname(sc->sc_dev), nexttx,
   7979 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7980 			    dmamap->dm_segs[seg].ds_len));
   7981 		}
   7982 
   7983 		KASSERT(lasttx != -1);
   7984 
   7985 		/*
   7986 		 * Set up the command byte on the last descriptor of
   7987 		 * the packet.  If we're in the interrupt delay window,
   7988 		 * delay the interrupt.
   7989 		 */
   7990 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7991 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7992 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7993 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7994 
   7995 		txs->txs_lastdesc = lasttx;
   7996 
   7997 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7998 		    device_xname(sc->sc_dev),
   7999 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8000 
   8001 		/* Sync the descriptors we're using. */
   8002 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8003 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8004 
   8005 		/* Give the packet to the chip. */
   8006 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8007 		sent = true;
   8008 
   8009 		DPRINTF(WM_DEBUG_TX,
   8010 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8011 
   8012 		DPRINTF(WM_DEBUG_TX,
   8013 		    ("%s: TX: finished transmitting packet, job %d\n",
   8014 		    device_xname(sc->sc_dev), txq->txq_snext));
   8015 
   8016 		/* Advance the tx pointer. */
   8017 		txq->txq_free -= txs->txs_ndesc;
   8018 		txq->txq_next = nexttx;
   8019 
   8020 		txq->txq_sfree--;
   8021 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8022 
   8023 		/* Pass the packet to any BPF listeners. */
   8024 		bpf_mtap(ifp, m0);
   8025 	}
   8026 
   8027 	if (m0 != NULL) {
   8028 		if (!is_transmit)
   8029 			ifp->if_flags |= IFF_OACTIVE;
   8030 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8031 		WM_Q_EVCNT_INCR(txq, txdrop);
   8032 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8033 			__func__));
   8034 		m_freem(m0);
   8035 	}
   8036 
   8037 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8038 		/* No more slots; notify upper layer. */
   8039 		if (!is_transmit)
   8040 			ifp->if_flags |= IFF_OACTIVE;
   8041 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8042 	}
   8043 
   8044 	if (sent) {
   8045 		/* Set a watchdog timer in case the chip flakes out. */
   8046 		txq->txq_lastsent = time_uptime;
   8047 		txq->txq_sending = true;
   8048 	}
   8049 }
   8050 
   8051 static void
   8052 wm_deferred_start_locked(struct wm_txqueue *txq)
   8053 {
   8054 	struct wm_softc *sc = txq->txq_sc;
   8055 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8056 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8057 	int qid = wmq->wmq_id;
   8058 
   8059 	KASSERT(mutex_owned(txq->txq_lock));
   8060 
   8061 	if (txq->txq_stopping) {
   8062 		mutex_exit(txq->txq_lock);
   8063 		return;
   8064 	}
   8065 
   8066 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8067 		/* XXX need for ALTQ or one CPU system */
   8068 		if (qid == 0)
   8069 			wm_nq_start_locked(ifp);
   8070 		wm_nq_transmit_locked(ifp, txq);
   8071 	} else {
   8072 		/* XXX need for ALTQ or one CPU system */
   8073 		if (qid == 0)
   8074 			wm_start_locked(ifp);
   8075 		wm_transmit_locked(ifp, txq);
   8076 	}
   8077 }
   8078 
   8079 /* Interrupt */
   8080 
   8081 /*
   8082  * wm_txeof:
   8083  *
   8084  *	Helper; handle transmit interrupts.
   8085  */
   8086 static bool
   8087 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8088 {
   8089 	struct wm_softc *sc = txq->txq_sc;
   8090 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8091 	struct wm_txsoft *txs;
   8092 	int count = 0;
   8093 	int i;
   8094 	uint8_t status;
   8095 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8096 	bool more = false;
   8097 
   8098 	KASSERT(mutex_owned(txq->txq_lock));
   8099 
   8100 	if (txq->txq_stopping)
   8101 		return false;
   8102 
   8103 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8104 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8105 	if (wmq->wmq_id == 0)
   8106 		ifp->if_flags &= ~IFF_OACTIVE;
   8107 
   8108 	/*
   8109 	 * Go through the Tx list and free mbufs for those
   8110 	 * frames which have been transmitted.
   8111 	 */
   8112 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8113 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8114 		if (limit-- == 0) {
   8115 			more = true;
   8116 			DPRINTF(WM_DEBUG_TX,
   8117 			    ("%s: TX: loop limited, job %d is not processed\n",
   8118 				device_xname(sc->sc_dev), i));
   8119 			break;
   8120 		}
   8121 
   8122 		txs = &txq->txq_soft[i];
   8123 
   8124 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8125 			device_xname(sc->sc_dev), i));
   8126 
   8127 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8128 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8129 
   8130 		status =
   8131 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8132 		if ((status & WTX_ST_DD) == 0) {
   8133 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8134 			    BUS_DMASYNC_PREREAD);
   8135 			break;
   8136 		}
   8137 
   8138 		count++;
   8139 		DPRINTF(WM_DEBUG_TX,
   8140 		    ("%s: TX: job %d done: descs %d..%d\n",
   8141 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8142 		    txs->txs_lastdesc));
   8143 
   8144 		/*
   8145 		 * XXX We should probably be using the statistics
   8146 		 * XXX registers, but I don't know if they exist
   8147 		 * XXX on chips before the i82544.
   8148 		 */
   8149 
   8150 #ifdef WM_EVENT_COUNTERS
   8151 		if (status & WTX_ST_TU)
   8152 			WM_Q_EVCNT_INCR(txq, tu);
   8153 #endif /* WM_EVENT_COUNTERS */
   8154 
   8155 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8156 			ifp->if_oerrors++;
   8157 			if (status & WTX_ST_LC)
   8158 				log(LOG_WARNING, "%s: late collision\n",
   8159 				    device_xname(sc->sc_dev));
   8160 			else if (status & WTX_ST_EC) {
   8161 				ifp->if_collisions += 16;
   8162 				log(LOG_WARNING, "%s: excessive collisions\n",
   8163 				    device_xname(sc->sc_dev));
   8164 			}
   8165 		} else
   8166 			ifp->if_opackets++;
   8167 
   8168 		txq->txq_packets++;
   8169 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8170 
   8171 		txq->txq_free += txs->txs_ndesc;
   8172 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8173 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8174 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8175 		m_freem(txs->txs_mbuf);
   8176 		txs->txs_mbuf = NULL;
   8177 	}
   8178 
   8179 	/* Update the dirty transmit buffer pointer. */
   8180 	txq->txq_sdirty = i;
   8181 	DPRINTF(WM_DEBUG_TX,
   8182 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8183 
   8184 	if (count != 0)
   8185 		rnd_add_uint32(&sc->rnd_source, count);
   8186 
   8187 	/*
   8188 	 * If there are no more pending transmissions, cancel the watchdog
   8189 	 * timer.
   8190 	 */
   8191 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8192 		txq->txq_sending = false;
   8193 
   8194 	return more;
   8195 }
   8196 
   8197 static inline uint32_t
   8198 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8199 {
   8200 	struct wm_softc *sc = rxq->rxq_sc;
   8201 
   8202 	if (sc->sc_type == WM_T_82574)
   8203 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8204 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8205 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8206 	else
   8207 		return rxq->rxq_descs[idx].wrx_status;
   8208 }
   8209 
   8210 static inline uint32_t
   8211 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8212 {
   8213 	struct wm_softc *sc = rxq->rxq_sc;
   8214 
   8215 	if (sc->sc_type == WM_T_82574)
   8216 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8217 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8218 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8219 	else
   8220 		return rxq->rxq_descs[idx].wrx_errors;
   8221 }
   8222 
   8223 static inline uint16_t
   8224 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8225 {
   8226 	struct wm_softc *sc = rxq->rxq_sc;
   8227 
   8228 	if (sc->sc_type == WM_T_82574)
   8229 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8230 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8231 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8232 	else
   8233 		return rxq->rxq_descs[idx].wrx_special;
   8234 }
   8235 
   8236 static inline int
   8237 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8238 {
   8239 	struct wm_softc *sc = rxq->rxq_sc;
   8240 
   8241 	if (sc->sc_type == WM_T_82574)
   8242 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8243 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8244 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8245 	else
   8246 		return rxq->rxq_descs[idx].wrx_len;
   8247 }
   8248 
   8249 #ifdef WM_DEBUG
   8250 static inline uint32_t
   8251 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8252 {
   8253 	struct wm_softc *sc = rxq->rxq_sc;
   8254 
   8255 	if (sc->sc_type == WM_T_82574)
   8256 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8257 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8258 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8259 	else
   8260 		return 0;
   8261 }
   8262 
   8263 static inline uint8_t
   8264 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8265 {
   8266 	struct wm_softc *sc = rxq->rxq_sc;
   8267 
   8268 	if (sc->sc_type == WM_T_82574)
   8269 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8270 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8271 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8272 	else
   8273 		return 0;
   8274 }
   8275 #endif /* WM_DEBUG */
   8276 
   8277 static inline bool
   8278 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8279     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8280 {
   8281 
   8282 	if (sc->sc_type == WM_T_82574)
   8283 		return (status & ext_bit) != 0;
   8284 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8285 		return (status & nq_bit) != 0;
   8286 	else
   8287 		return (status & legacy_bit) != 0;
   8288 }
   8289 
   8290 static inline bool
   8291 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8292     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8293 {
   8294 
   8295 	if (sc->sc_type == WM_T_82574)
   8296 		return (error & ext_bit) != 0;
   8297 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8298 		return (error & nq_bit) != 0;
   8299 	else
   8300 		return (error & legacy_bit) != 0;
   8301 }
   8302 
   8303 static inline bool
   8304 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8305 {
   8306 
   8307 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8308 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8309 		return true;
   8310 	else
   8311 		return false;
   8312 }
   8313 
   8314 static inline bool
   8315 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8316 {
   8317 	struct wm_softc *sc = rxq->rxq_sc;
   8318 
   8319 	/* XXXX missing error bit for newqueue? */
   8320 	if (wm_rxdesc_is_set_error(sc, errors,
   8321 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8322 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8323 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8324 		NQRXC_ERROR_RXE)) {
   8325 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8326 		    EXTRXC_ERROR_SE, 0))
   8327 			log(LOG_WARNING, "%s: symbol error\n",
   8328 			    device_xname(sc->sc_dev));
   8329 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8330 		    EXTRXC_ERROR_SEQ, 0))
   8331 			log(LOG_WARNING, "%s: receive sequence error\n",
   8332 			    device_xname(sc->sc_dev));
   8333 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8334 		    EXTRXC_ERROR_CE, 0))
   8335 			log(LOG_WARNING, "%s: CRC error\n",
   8336 			    device_xname(sc->sc_dev));
   8337 		return true;
   8338 	}
   8339 
   8340 	return false;
   8341 }
   8342 
   8343 static inline bool
   8344 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8345 {
   8346 	struct wm_softc *sc = rxq->rxq_sc;
   8347 
   8348 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8349 		NQRXC_STATUS_DD)) {
   8350 		/* We have processed all of the receive descriptors. */
   8351 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8352 		return false;
   8353 	}
   8354 
   8355 	return true;
   8356 }
   8357 
   8358 static inline bool
   8359 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8360     uint16_t vlantag, struct mbuf *m)
   8361 {
   8362 
   8363 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8364 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8365 		vlan_set_tag(m, le16toh(vlantag));
   8366 	}
   8367 
   8368 	return true;
   8369 }
   8370 
   8371 static inline void
   8372 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8373     uint32_t errors, struct mbuf *m)
   8374 {
   8375 	struct wm_softc *sc = rxq->rxq_sc;
   8376 
   8377 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8378 		if (wm_rxdesc_is_set_status(sc, status,
   8379 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8380 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8381 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8382 			if (wm_rxdesc_is_set_error(sc, errors,
   8383 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8384 				m->m_pkthdr.csum_flags |=
   8385 					M_CSUM_IPv4_BAD;
   8386 		}
   8387 		if (wm_rxdesc_is_set_status(sc, status,
   8388 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8389 			/*
   8390 			 * Note: we don't know if this was TCP or UDP,
   8391 			 * so we just set both bits, and expect the
   8392 			 * upper layers to deal.
   8393 			 */
   8394 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8395 			m->m_pkthdr.csum_flags |=
   8396 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8397 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8398 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8399 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8400 				m->m_pkthdr.csum_flags |=
   8401 					M_CSUM_TCP_UDP_BAD;
   8402 		}
   8403 	}
   8404 }
   8405 
   8406 /*
   8407  * wm_rxeof:
   8408  *
   8409  *	Helper; handle receive interrupts.
   8410  */
   8411 static bool
   8412 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8413 {
   8414 	struct wm_softc *sc = rxq->rxq_sc;
   8415 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8416 	struct wm_rxsoft *rxs;
   8417 	struct mbuf *m;
   8418 	int i, len;
   8419 	int count = 0;
   8420 	uint32_t status, errors;
   8421 	uint16_t vlantag;
   8422 	bool more = false;
   8423 
   8424 	KASSERT(mutex_owned(rxq->rxq_lock));
   8425 
   8426 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8427 		if (limit-- == 0) {
   8428 			rxq->rxq_ptr = i;
   8429 			more = true;
   8430 			DPRINTF(WM_DEBUG_RX,
   8431 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8432 				device_xname(sc->sc_dev), i));
   8433 			break;
   8434 		}
   8435 
   8436 		rxs = &rxq->rxq_soft[i];
   8437 
   8438 		DPRINTF(WM_DEBUG_RX,
   8439 		    ("%s: RX: checking descriptor %d\n",
   8440 		    device_xname(sc->sc_dev), i));
   8441 		wm_cdrxsync(rxq, i,
   8442 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8443 
   8444 		status = wm_rxdesc_get_status(rxq, i);
   8445 		errors = wm_rxdesc_get_errors(rxq, i);
   8446 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8447 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8448 #ifdef WM_DEBUG
   8449 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8450 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8451 #endif
   8452 
   8453 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8454 			/*
   8455 			 * Update the receive pointer holding rxq_lock
   8456 			 * consistent with increment counter.
   8457 			 */
   8458 			rxq->rxq_ptr = i;
   8459 			break;
   8460 		}
   8461 
   8462 		count++;
   8463 		if (__predict_false(rxq->rxq_discard)) {
   8464 			DPRINTF(WM_DEBUG_RX,
   8465 			    ("%s: RX: discarding contents of descriptor %d\n",
   8466 			    device_xname(sc->sc_dev), i));
   8467 			wm_init_rxdesc(rxq, i);
   8468 			if (wm_rxdesc_is_eop(rxq, status)) {
   8469 				/* Reset our state. */
   8470 				DPRINTF(WM_DEBUG_RX,
   8471 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8472 				    device_xname(sc->sc_dev)));
   8473 				rxq->rxq_discard = 0;
   8474 			}
   8475 			continue;
   8476 		}
   8477 
   8478 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8479 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8480 
   8481 		m = rxs->rxs_mbuf;
   8482 
   8483 		/*
   8484 		 * Add a new receive buffer to the ring, unless of
   8485 		 * course the length is zero. Treat the latter as a
   8486 		 * failed mapping.
   8487 		 */
   8488 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8489 			/*
   8490 			 * Failed, throw away what we've done so
   8491 			 * far, and discard the rest of the packet.
   8492 			 */
   8493 			ifp->if_ierrors++;
   8494 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8495 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8496 			wm_init_rxdesc(rxq, i);
   8497 			if (!wm_rxdesc_is_eop(rxq, status))
   8498 				rxq->rxq_discard = 1;
   8499 			if (rxq->rxq_head != NULL)
   8500 				m_freem(rxq->rxq_head);
   8501 			WM_RXCHAIN_RESET(rxq);
   8502 			DPRINTF(WM_DEBUG_RX,
   8503 			    ("%s: RX: Rx buffer allocation failed, "
   8504 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8505 			    rxq->rxq_discard ? " (discard)" : ""));
   8506 			continue;
   8507 		}
   8508 
   8509 		m->m_len = len;
   8510 		rxq->rxq_len += len;
   8511 		DPRINTF(WM_DEBUG_RX,
   8512 		    ("%s: RX: buffer at %p len %d\n",
   8513 		    device_xname(sc->sc_dev), m->m_data, len));
   8514 
   8515 		/* If this is not the end of the packet, keep looking. */
   8516 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8517 			WM_RXCHAIN_LINK(rxq, m);
   8518 			DPRINTF(WM_DEBUG_RX,
   8519 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8520 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8521 			continue;
   8522 		}
   8523 
   8524 		/*
   8525 		 * Okay, we have the entire packet now.  The chip is
   8526 		 * configured to include the FCS except I350 and I21[01]
   8527 		 * (not all chips can be configured to strip it),
   8528 		 * so we need to trim it.
   8529 		 * May need to adjust length of previous mbuf in the
   8530 		 * chain if the current mbuf is too short.
   8531 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8532 		 * is always set in I350, so we don't trim it.
   8533 		 */
   8534 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8535 		    && (sc->sc_type != WM_T_I210)
   8536 		    && (sc->sc_type != WM_T_I211)) {
   8537 			if (m->m_len < ETHER_CRC_LEN) {
   8538 				rxq->rxq_tail->m_len
   8539 				    -= (ETHER_CRC_LEN - m->m_len);
   8540 				m->m_len = 0;
   8541 			} else
   8542 				m->m_len -= ETHER_CRC_LEN;
   8543 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8544 		} else
   8545 			len = rxq->rxq_len;
   8546 
   8547 		WM_RXCHAIN_LINK(rxq, m);
   8548 
   8549 		*rxq->rxq_tailp = NULL;
   8550 		m = rxq->rxq_head;
   8551 
   8552 		WM_RXCHAIN_RESET(rxq);
   8553 
   8554 		DPRINTF(WM_DEBUG_RX,
   8555 		    ("%s: RX: have entire packet, len -> %d\n",
   8556 		    device_xname(sc->sc_dev), len));
   8557 
   8558 		/* If an error occurred, update stats and drop the packet. */
   8559 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8560 			m_freem(m);
   8561 			continue;
   8562 		}
   8563 
   8564 		/* No errors.  Receive the packet. */
   8565 		m_set_rcvif(m, ifp);
   8566 		m->m_pkthdr.len = len;
   8567 		/*
   8568 		 * TODO
   8569 		 * should be save rsshash and rsstype to this mbuf.
   8570 		 */
   8571 		DPRINTF(WM_DEBUG_RX,
   8572 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8573 			device_xname(sc->sc_dev), rsstype, rsshash));
   8574 
   8575 		/*
   8576 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8577 		 * for us.  Associate the tag with the packet.
   8578 		 */
   8579 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8580 			continue;
   8581 
   8582 		/* Set up checksum info for this packet. */
   8583 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8584 		/*
   8585 		 * Update the receive pointer holding rxq_lock consistent with
   8586 		 * increment counter.
   8587 		 */
   8588 		rxq->rxq_ptr = i;
   8589 		rxq->rxq_packets++;
   8590 		rxq->rxq_bytes += len;
   8591 		mutex_exit(rxq->rxq_lock);
   8592 
   8593 		/* Pass it on. */
   8594 		if_percpuq_enqueue(sc->sc_ipq, m);
   8595 
   8596 		mutex_enter(rxq->rxq_lock);
   8597 
   8598 		if (rxq->rxq_stopping)
   8599 			break;
   8600 	}
   8601 
   8602 	if (count != 0)
   8603 		rnd_add_uint32(&sc->rnd_source, count);
   8604 
   8605 	DPRINTF(WM_DEBUG_RX,
   8606 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8607 
   8608 	return more;
   8609 }
   8610 
   8611 /*
   8612  * wm_linkintr_gmii:
   8613  *
   8614  *	Helper; handle link interrupts for GMII.
   8615  */
   8616 static void
   8617 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8618 {
   8619 
   8620 	KASSERT(WM_CORE_LOCKED(sc));
   8621 
   8622 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8623 		__func__));
   8624 
   8625 	if (icr & ICR_LSC) {
   8626 		uint32_t reg;
   8627 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8628 
   8629 		if ((status & STATUS_LU) != 0) {
   8630 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8631 				device_xname(sc->sc_dev),
   8632 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8633 		} else {
   8634 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8635 				device_xname(sc->sc_dev)));
   8636 		}
   8637 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8638 			wm_gig_downshift_workaround_ich8lan(sc);
   8639 
   8640 		if ((sc->sc_type == WM_T_ICH8)
   8641 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8642 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8643 		}
   8644 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8645 			device_xname(sc->sc_dev)));
   8646 		mii_pollstat(&sc->sc_mii);
   8647 		if (sc->sc_type == WM_T_82543) {
   8648 			int miistatus, active;
   8649 
   8650 			/*
   8651 			 * With 82543, we need to force speed and
   8652 			 * duplex on the MAC equal to what the PHY
   8653 			 * speed and duplex configuration is.
   8654 			 */
   8655 			miistatus = sc->sc_mii.mii_media_status;
   8656 
   8657 			if (miistatus & IFM_ACTIVE) {
   8658 				active = sc->sc_mii.mii_media_active;
   8659 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8660 				switch (IFM_SUBTYPE(active)) {
   8661 				case IFM_10_T:
   8662 					sc->sc_ctrl |= CTRL_SPEED_10;
   8663 					break;
   8664 				case IFM_100_TX:
   8665 					sc->sc_ctrl |= CTRL_SPEED_100;
   8666 					break;
   8667 				case IFM_1000_T:
   8668 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8669 					break;
   8670 				default:
   8671 					/*
   8672 					 * fiber?
   8673 					 * Shoud not enter here.
   8674 					 */
   8675 					printf("unknown media (%x)\n", active);
   8676 					break;
   8677 				}
   8678 				if (active & IFM_FDX)
   8679 					sc->sc_ctrl |= CTRL_FD;
   8680 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8681 			}
   8682 		} else if (sc->sc_type == WM_T_PCH) {
   8683 			wm_k1_gig_workaround_hv(sc,
   8684 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8685 		}
   8686 
   8687 		if ((sc->sc_phytype == WMPHY_82578)
   8688 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8689 			== IFM_1000_T)) {
   8690 
   8691 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8692 				delay(200*1000); /* XXX too big */
   8693 
   8694 				/* Link stall fix for link up */
   8695 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8696 				    HV_MUX_DATA_CTRL,
   8697 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8698 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8699 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8700 				    HV_MUX_DATA_CTRL,
   8701 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8702 			}
   8703 		}
   8704 		/*
   8705 		 * I217 Packet Loss issue:
   8706 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8707 		 * on power up.
   8708 		 * Set the Beacon Duration for I217 to 8 usec
   8709 		 */
   8710 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8711 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8712 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8713 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8714 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8715 		}
   8716 
   8717 		/* XXX Work-around I218 hang issue */
   8718 		/* e1000_k1_workaround_lpt_lp() */
   8719 
   8720 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8721 			/*
   8722 			 * Set platform power management values for Latency
   8723 			 * Tolerance Reporting (LTR)
   8724 			 */
   8725 			wm_platform_pm_pch_lpt(sc,
   8726 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8727 				    != 0));
   8728 		}
   8729 
   8730 		/* FEXTNVM6 K1-off workaround */
   8731 		if (sc->sc_type == WM_T_PCH_SPT) {
   8732 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8733 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8734 			    & FEXTNVM6_K1_OFF_ENABLE)
   8735 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8736 			else
   8737 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8738 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8739 		}
   8740 	} else if (icr & ICR_RXSEQ) {
   8741 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8742 			device_xname(sc->sc_dev)));
   8743 	}
   8744 }
   8745 
   8746 /*
   8747  * wm_linkintr_tbi:
   8748  *
   8749  *	Helper; handle link interrupts for TBI mode.
   8750  */
   8751 static void
   8752 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8753 {
   8754 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8755 	uint32_t status;
   8756 
   8757 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8758 		__func__));
   8759 
   8760 	status = CSR_READ(sc, WMREG_STATUS);
   8761 	if (icr & ICR_LSC) {
   8762 		if (status & STATUS_LU) {
   8763 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8764 			    device_xname(sc->sc_dev),
   8765 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8766 			/*
   8767 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8768 			 * so we should update sc->sc_ctrl
   8769 			 */
   8770 
   8771 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8772 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8773 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8774 			if (status & STATUS_FD)
   8775 				sc->sc_tctl |=
   8776 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8777 			else
   8778 				sc->sc_tctl |=
   8779 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8780 			if (sc->sc_ctrl & CTRL_TFCE)
   8781 				sc->sc_fcrtl |= FCRTL_XONE;
   8782 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8783 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8784 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8785 				      sc->sc_fcrtl);
   8786 			sc->sc_tbi_linkup = 1;
   8787 			if_link_state_change(ifp, LINK_STATE_UP);
   8788 		} else {
   8789 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8790 			    device_xname(sc->sc_dev)));
   8791 			sc->sc_tbi_linkup = 0;
   8792 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8793 		}
   8794 		/* Update LED */
   8795 		wm_tbi_serdes_set_linkled(sc);
   8796 	} else if (icr & ICR_RXSEQ) {
   8797 		DPRINTF(WM_DEBUG_LINK,
   8798 		    ("%s: LINK: Receive sequence error\n",
   8799 		    device_xname(sc->sc_dev)));
   8800 	}
   8801 }
   8802 
   8803 /*
   8804  * wm_linkintr_serdes:
   8805  *
   8806  *	Helper; handle link interrupts for TBI mode.
   8807  */
   8808 static void
   8809 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8810 {
   8811 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8812 	struct mii_data *mii = &sc->sc_mii;
   8813 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8814 	uint32_t pcs_adv, pcs_lpab, reg;
   8815 
   8816 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8817 		__func__));
   8818 
   8819 	if (icr & ICR_LSC) {
   8820 		/* Check PCS */
   8821 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8822 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8823 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8824 				device_xname(sc->sc_dev)));
   8825 			mii->mii_media_status |= IFM_ACTIVE;
   8826 			sc->sc_tbi_linkup = 1;
   8827 			if_link_state_change(ifp, LINK_STATE_UP);
   8828 		} else {
   8829 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8830 				device_xname(sc->sc_dev)));
   8831 			mii->mii_media_status |= IFM_NONE;
   8832 			sc->sc_tbi_linkup = 0;
   8833 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8834 			wm_tbi_serdes_set_linkled(sc);
   8835 			return;
   8836 		}
   8837 		mii->mii_media_active |= IFM_1000_SX;
   8838 		if ((reg & PCS_LSTS_FDX) != 0)
   8839 			mii->mii_media_active |= IFM_FDX;
   8840 		else
   8841 			mii->mii_media_active |= IFM_HDX;
   8842 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8843 			/* Check flow */
   8844 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8845 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8846 				DPRINTF(WM_DEBUG_LINK,
   8847 				    ("XXX LINKOK but not ACOMP\n"));
   8848 				return;
   8849 			}
   8850 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8851 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8852 			DPRINTF(WM_DEBUG_LINK,
   8853 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8854 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8855 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8856 				mii->mii_media_active |= IFM_FLOW
   8857 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8858 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8859 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8860 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8861 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8862 				mii->mii_media_active |= IFM_FLOW
   8863 				    | IFM_ETH_TXPAUSE;
   8864 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8865 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8866 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8867 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8868 				mii->mii_media_active |= IFM_FLOW
   8869 				    | IFM_ETH_RXPAUSE;
   8870 		}
   8871 		/* Update LED */
   8872 		wm_tbi_serdes_set_linkled(sc);
   8873 	} else {
   8874 		DPRINTF(WM_DEBUG_LINK,
   8875 		    ("%s: LINK: Receive sequence error\n",
   8876 		    device_xname(sc->sc_dev)));
   8877 	}
   8878 }
   8879 
   8880 /*
   8881  * wm_linkintr:
   8882  *
   8883  *	Helper; handle link interrupts.
   8884  */
   8885 static void
   8886 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8887 {
   8888 
   8889 	KASSERT(WM_CORE_LOCKED(sc));
   8890 
   8891 	if (sc->sc_flags & WM_F_HAS_MII)
   8892 		wm_linkintr_gmii(sc, icr);
   8893 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8894 	    && (sc->sc_type >= WM_T_82575))
   8895 		wm_linkintr_serdes(sc, icr);
   8896 	else
   8897 		wm_linkintr_tbi(sc, icr);
   8898 }
   8899 
   8900 /*
   8901  * wm_intr_legacy:
   8902  *
   8903  *	Interrupt service routine for INTx and MSI.
   8904  */
   8905 static int
   8906 wm_intr_legacy(void *arg)
   8907 {
   8908 	struct wm_softc *sc = arg;
   8909 	struct wm_queue *wmq = &sc->sc_queue[0];
   8910 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8911 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8912 	uint32_t icr, rndval = 0;
   8913 	int handled = 0;
   8914 
   8915 	while (1 /* CONSTCOND */) {
   8916 		icr = CSR_READ(sc, WMREG_ICR);
   8917 		if ((icr & sc->sc_icr) == 0)
   8918 			break;
   8919 		if (handled == 0) {
   8920 			DPRINTF(WM_DEBUG_TX,
   8921 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8922 		}
   8923 		if (rndval == 0)
   8924 			rndval = icr;
   8925 
   8926 		mutex_enter(rxq->rxq_lock);
   8927 
   8928 		if (rxq->rxq_stopping) {
   8929 			mutex_exit(rxq->rxq_lock);
   8930 			break;
   8931 		}
   8932 
   8933 		handled = 1;
   8934 
   8935 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8936 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8937 			DPRINTF(WM_DEBUG_RX,
   8938 			    ("%s: RX: got Rx intr 0x%08x\n",
   8939 			    device_xname(sc->sc_dev),
   8940 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8941 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8942 		}
   8943 #endif
   8944 		/*
   8945 		 * wm_rxeof() does *not* call upper layer functions directly,
   8946 		 * as if_percpuq_enqueue() just call softint_schedule().
   8947 		 * So, we can call wm_rxeof() in interrupt context.
   8948 		 */
   8949 		wm_rxeof(rxq, UINT_MAX);
   8950 
   8951 		mutex_exit(rxq->rxq_lock);
   8952 		mutex_enter(txq->txq_lock);
   8953 
   8954 		if (txq->txq_stopping) {
   8955 			mutex_exit(txq->txq_lock);
   8956 			break;
   8957 		}
   8958 
   8959 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8960 		if (icr & ICR_TXDW) {
   8961 			DPRINTF(WM_DEBUG_TX,
   8962 			    ("%s: TX: got TXDW interrupt\n",
   8963 			    device_xname(sc->sc_dev)));
   8964 			WM_Q_EVCNT_INCR(txq, txdw);
   8965 		}
   8966 #endif
   8967 		wm_txeof(txq, UINT_MAX);
   8968 
   8969 		mutex_exit(txq->txq_lock);
   8970 		WM_CORE_LOCK(sc);
   8971 
   8972 		if (sc->sc_core_stopping) {
   8973 			WM_CORE_UNLOCK(sc);
   8974 			break;
   8975 		}
   8976 
   8977 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8978 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8979 			wm_linkintr(sc, icr);
   8980 		}
   8981 
   8982 		WM_CORE_UNLOCK(sc);
   8983 
   8984 		if (icr & ICR_RXO) {
   8985 #if defined(WM_DEBUG)
   8986 			log(LOG_WARNING, "%s: Receive overrun\n",
   8987 			    device_xname(sc->sc_dev));
   8988 #endif /* defined(WM_DEBUG) */
   8989 		}
   8990 	}
   8991 
   8992 	rnd_add_uint32(&sc->rnd_source, rndval);
   8993 
   8994 	if (handled) {
   8995 		/* Try to get more packets going. */
   8996 		softint_schedule(wmq->wmq_si);
   8997 	}
   8998 
   8999 	return handled;
   9000 }
   9001 
   9002 static inline void
   9003 wm_txrxintr_disable(struct wm_queue *wmq)
   9004 {
   9005 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9006 
   9007 	if (sc->sc_type == WM_T_82574)
   9008 		CSR_WRITE(sc, WMREG_IMC,
   9009 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9010 	else if (sc->sc_type == WM_T_82575)
   9011 		CSR_WRITE(sc, WMREG_EIMC,
   9012 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9013 	else
   9014 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9015 }
   9016 
   9017 static inline void
   9018 wm_txrxintr_enable(struct wm_queue *wmq)
   9019 {
   9020 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9021 
   9022 	wm_itrs_calculate(sc, wmq);
   9023 
   9024 	/*
   9025 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9026 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9027 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9028 	 * while each wm_handle_queue(wmq) is runnig.
   9029 	 */
   9030 	if (sc->sc_type == WM_T_82574)
   9031 		CSR_WRITE(sc, WMREG_IMS,
   9032 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9033 	else if (sc->sc_type == WM_T_82575)
   9034 		CSR_WRITE(sc, WMREG_EIMS,
   9035 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9036 	else
   9037 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9038 }
   9039 
   9040 static int
   9041 wm_txrxintr_msix(void *arg)
   9042 {
   9043 	struct wm_queue *wmq = arg;
   9044 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9045 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9046 	struct wm_softc *sc = txq->txq_sc;
   9047 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9048 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9049 	bool txmore;
   9050 	bool rxmore;
   9051 
   9052 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9053 
   9054 	DPRINTF(WM_DEBUG_TX,
   9055 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9056 
   9057 	wm_txrxintr_disable(wmq);
   9058 
   9059 	mutex_enter(txq->txq_lock);
   9060 
   9061 	if (txq->txq_stopping) {
   9062 		mutex_exit(txq->txq_lock);
   9063 		return 0;
   9064 	}
   9065 
   9066 	WM_Q_EVCNT_INCR(txq, txdw);
   9067 	txmore = wm_txeof(txq, txlimit);
   9068 	/* wm_deferred start() is done in wm_handle_queue(). */
   9069 	mutex_exit(txq->txq_lock);
   9070 
   9071 	DPRINTF(WM_DEBUG_RX,
   9072 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9073 	mutex_enter(rxq->rxq_lock);
   9074 
   9075 	if (rxq->rxq_stopping) {
   9076 		mutex_exit(rxq->rxq_lock);
   9077 		return 0;
   9078 	}
   9079 
   9080 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9081 	rxmore = wm_rxeof(rxq, rxlimit);
   9082 	mutex_exit(rxq->rxq_lock);
   9083 
   9084 	wm_itrs_writereg(sc, wmq);
   9085 
   9086 	if (txmore || rxmore)
   9087 		softint_schedule(wmq->wmq_si);
   9088 	else
   9089 		wm_txrxintr_enable(wmq);
   9090 
   9091 	return 1;
   9092 }
   9093 
   9094 static void
   9095 wm_handle_queue(void *arg)
   9096 {
   9097 	struct wm_queue *wmq = arg;
   9098 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9099 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9100 	struct wm_softc *sc = txq->txq_sc;
   9101 	u_int txlimit = sc->sc_tx_process_limit;
   9102 	u_int rxlimit = sc->sc_rx_process_limit;
   9103 	bool txmore;
   9104 	bool rxmore;
   9105 
   9106 	mutex_enter(txq->txq_lock);
   9107 	if (txq->txq_stopping) {
   9108 		mutex_exit(txq->txq_lock);
   9109 		return;
   9110 	}
   9111 	txmore = wm_txeof(txq, txlimit);
   9112 	wm_deferred_start_locked(txq);
   9113 	mutex_exit(txq->txq_lock);
   9114 
   9115 	mutex_enter(rxq->rxq_lock);
   9116 	if (rxq->rxq_stopping) {
   9117 		mutex_exit(rxq->rxq_lock);
   9118 		return;
   9119 	}
   9120 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9121 	rxmore = wm_rxeof(rxq, rxlimit);
   9122 	mutex_exit(rxq->rxq_lock);
   9123 
   9124 	if (txmore || rxmore)
   9125 		softint_schedule(wmq->wmq_si);
   9126 	else
   9127 		wm_txrxintr_enable(wmq);
   9128 }
   9129 
   9130 /*
   9131  * wm_linkintr_msix:
   9132  *
   9133  *	Interrupt service routine for link status change for MSI-X.
   9134  */
   9135 static int
   9136 wm_linkintr_msix(void *arg)
   9137 {
   9138 	struct wm_softc *sc = arg;
   9139 	uint32_t reg;
   9140 	bool has_rxo;
   9141 
   9142 	DPRINTF(WM_DEBUG_LINK,
   9143 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9144 
   9145 	reg = CSR_READ(sc, WMREG_ICR);
   9146 	WM_CORE_LOCK(sc);
   9147 	if (sc->sc_core_stopping)
   9148 		goto out;
   9149 
   9150 	if((reg & ICR_LSC) != 0) {
   9151 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9152 		wm_linkintr(sc, ICR_LSC);
   9153 	}
   9154 
   9155 	/*
   9156 	 * XXX 82574 MSI-X mode workaround
   9157 	 *
   9158 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9159 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9160 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9161 	 * interrupts by writing WMREG_ICS to process receive packets.
   9162 	 */
   9163 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9164 #if defined(WM_DEBUG)
   9165 		log(LOG_WARNING, "%s: Receive overrun\n",
   9166 		    device_xname(sc->sc_dev));
   9167 #endif /* defined(WM_DEBUG) */
   9168 
   9169 		has_rxo = true;
   9170 		/*
   9171 		 * The RXO interrupt is very high rate when receive traffic is
   9172 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9173 		 * interrupts. ICR_OTHER will be enabled at the end of
   9174 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9175 		 * ICR_RXQ(1) interrupts.
   9176 		 */
   9177 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9178 
   9179 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9180 	}
   9181 
   9182 
   9183 
   9184 out:
   9185 	WM_CORE_UNLOCK(sc);
   9186 
   9187 	if (sc->sc_type == WM_T_82574) {
   9188 		if (!has_rxo)
   9189 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9190 		else
   9191 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9192 	} else if (sc->sc_type == WM_T_82575)
   9193 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9194 	else
   9195 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9196 
   9197 	return 1;
   9198 }
   9199 
   9200 /*
   9201  * Media related.
   9202  * GMII, SGMII, TBI (and SERDES)
   9203  */
   9204 
   9205 /* Common */
   9206 
   9207 /*
   9208  * wm_tbi_serdes_set_linkled:
   9209  *
   9210  *	Update the link LED on TBI and SERDES devices.
   9211  */
   9212 static void
   9213 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9214 {
   9215 
   9216 	if (sc->sc_tbi_linkup)
   9217 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9218 	else
   9219 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9220 
   9221 	/* 82540 or newer devices are active low */
   9222 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9223 
   9224 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9225 }
   9226 
   9227 /* GMII related */
   9228 
   9229 /*
   9230  * wm_gmii_reset:
   9231  *
   9232  *	Reset the PHY.
   9233  */
   9234 static void
   9235 wm_gmii_reset(struct wm_softc *sc)
   9236 {
   9237 	uint32_t reg;
   9238 	int rv;
   9239 
   9240 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9241 		device_xname(sc->sc_dev), __func__));
   9242 
   9243 	rv = sc->phy.acquire(sc);
   9244 	if (rv != 0) {
   9245 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9246 		    __func__);
   9247 		return;
   9248 	}
   9249 
   9250 	switch (sc->sc_type) {
   9251 	case WM_T_82542_2_0:
   9252 	case WM_T_82542_2_1:
   9253 		/* null */
   9254 		break;
   9255 	case WM_T_82543:
   9256 		/*
   9257 		 * With 82543, we need to force speed and duplex on the MAC
   9258 		 * equal to what the PHY speed and duplex configuration is.
   9259 		 * In addition, we need to perform a hardware reset on the PHY
   9260 		 * to take it out of reset.
   9261 		 */
   9262 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9263 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9264 
   9265 		/* The PHY reset pin is active-low. */
   9266 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9267 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9268 		    CTRL_EXT_SWDPIN(4));
   9269 		reg |= CTRL_EXT_SWDPIO(4);
   9270 
   9271 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9272 		CSR_WRITE_FLUSH(sc);
   9273 		delay(10*1000);
   9274 
   9275 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9276 		CSR_WRITE_FLUSH(sc);
   9277 		delay(150);
   9278 #if 0
   9279 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9280 #endif
   9281 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9282 		break;
   9283 	case WM_T_82544:	/* reset 10000us */
   9284 	case WM_T_82540:
   9285 	case WM_T_82545:
   9286 	case WM_T_82545_3:
   9287 	case WM_T_82546:
   9288 	case WM_T_82546_3:
   9289 	case WM_T_82541:
   9290 	case WM_T_82541_2:
   9291 	case WM_T_82547:
   9292 	case WM_T_82547_2:
   9293 	case WM_T_82571:	/* reset 100us */
   9294 	case WM_T_82572:
   9295 	case WM_T_82573:
   9296 	case WM_T_82574:
   9297 	case WM_T_82575:
   9298 	case WM_T_82576:
   9299 	case WM_T_82580:
   9300 	case WM_T_I350:
   9301 	case WM_T_I354:
   9302 	case WM_T_I210:
   9303 	case WM_T_I211:
   9304 	case WM_T_82583:
   9305 	case WM_T_80003:
   9306 		/* generic reset */
   9307 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9308 		CSR_WRITE_FLUSH(sc);
   9309 		delay(20000);
   9310 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9311 		CSR_WRITE_FLUSH(sc);
   9312 		delay(20000);
   9313 
   9314 		if ((sc->sc_type == WM_T_82541)
   9315 		    || (sc->sc_type == WM_T_82541_2)
   9316 		    || (sc->sc_type == WM_T_82547)
   9317 		    || (sc->sc_type == WM_T_82547_2)) {
   9318 			/* workaround for igp are done in igp_reset() */
   9319 			/* XXX add code to set LED after phy reset */
   9320 		}
   9321 		break;
   9322 	case WM_T_ICH8:
   9323 	case WM_T_ICH9:
   9324 	case WM_T_ICH10:
   9325 	case WM_T_PCH:
   9326 	case WM_T_PCH2:
   9327 	case WM_T_PCH_LPT:
   9328 	case WM_T_PCH_SPT:
   9329 	case WM_T_PCH_CNP:
   9330 		/* generic reset */
   9331 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9332 		CSR_WRITE_FLUSH(sc);
   9333 		delay(100);
   9334 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9335 		CSR_WRITE_FLUSH(sc);
   9336 		delay(150);
   9337 		break;
   9338 	default:
   9339 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9340 		    __func__);
   9341 		break;
   9342 	}
   9343 
   9344 	sc->phy.release(sc);
   9345 
   9346 	/* get_cfg_done */
   9347 	wm_get_cfg_done(sc);
   9348 
   9349 	/* extra setup */
   9350 	switch (sc->sc_type) {
   9351 	case WM_T_82542_2_0:
   9352 	case WM_T_82542_2_1:
   9353 	case WM_T_82543:
   9354 	case WM_T_82544:
   9355 	case WM_T_82540:
   9356 	case WM_T_82545:
   9357 	case WM_T_82545_3:
   9358 	case WM_T_82546:
   9359 	case WM_T_82546_3:
   9360 	case WM_T_82541_2:
   9361 	case WM_T_82547_2:
   9362 	case WM_T_82571:
   9363 	case WM_T_82572:
   9364 	case WM_T_82573:
   9365 	case WM_T_82574:
   9366 	case WM_T_82583:
   9367 	case WM_T_82575:
   9368 	case WM_T_82576:
   9369 	case WM_T_82580:
   9370 	case WM_T_I350:
   9371 	case WM_T_I354:
   9372 	case WM_T_I210:
   9373 	case WM_T_I211:
   9374 	case WM_T_80003:
   9375 		/* null */
   9376 		break;
   9377 	case WM_T_82541:
   9378 	case WM_T_82547:
   9379 		/* XXX Configure actively LED after PHY reset */
   9380 		break;
   9381 	case WM_T_ICH8:
   9382 	case WM_T_ICH9:
   9383 	case WM_T_ICH10:
   9384 	case WM_T_PCH:
   9385 	case WM_T_PCH2:
   9386 	case WM_T_PCH_LPT:
   9387 	case WM_T_PCH_SPT:
   9388 	case WM_T_PCH_CNP:
   9389 		wm_phy_post_reset(sc);
   9390 		break;
   9391 	default:
   9392 		panic("%s: unknown type\n", __func__);
   9393 		break;
   9394 	}
   9395 }
   9396 
   9397 /*
   9398  * Setup sc_phytype and mii_{read|write}reg.
   9399  *
   9400  *  To identify PHY type, correct read/write function should be selected.
   9401  * To select correct read/write function, PCI ID or MAC type are required
   9402  * without accessing PHY registers.
   9403  *
   9404  *  On the first call of this function, PHY ID is not known yet. Check
   9405  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9406  * result might be incorrect.
   9407  *
   9408  *  In the second call, PHY OUI and model is used to identify PHY type.
   9409  * It might not be perfpect because of the lack of compared entry, but it
   9410  * would be better than the first call.
   9411  *
   9412  *  If the detected new result and previous assumption is different,
   9413  * diagnous message will be printed.
   9414  */
   9415 static void
   9416 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9417     uint16_t phy_model)
   9418 {
   9419 	device_t dev = sc->sc_dev;
   9420 	struct mii_data *mii = &sc->sc_mii;
   9421 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9422 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9423 	mii_readreg_t new_readreg;
   9424 	mii_writereg_t new_writereg;
   9425 
   9426 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9427 		device_xname(sc->sc_dev), __func__));
   9428 
   9429 	if (mii->mii_readreg == NULL) {
   9430 		/*
   9431 		 *  This is the first call of this function. For ICH and PCH
   9432 		 * variants, it's difficult to determine the PHY access method
   9433 		 * by sc_type, so use the PCI product ID for some devices.
   9434 		 */
   9435 
   9436 		switch (sc->sc_pcidevid) {
   9437 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9438 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9439 			/* 82577 */
   9440 			new_phytype = WMPHY_82577;
   9441 			break;
   9442 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9443 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9444 			/* 82578 */
   9445 			new_phytype = WMPHY_82578;
   9446 			break;
   9447 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9448 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9449 			/* 82579 */
   9450 			new_phytype = WMPHY_82579;
   9451 			break;
   9452 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9453 		case PCI_PRODUCT_INTEL_82801I_BM:
   9454 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9455 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9456 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9457 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9458 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9459 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9460 			/* ICH8, 9, 10 with 82567 */
   9461 			new_phytype = WMPHY_BM;
   9462 			break;
   9463 		default:
   9464 			break;
   9465 		}
   9466 	} else {
   9467 		/* It's not the first call. Use PHY OUI and model */
   9468 		switch (phy_oui) {
   9469 		case MII_OUI_ATHEROS: /* XXX ??? */
   9470 			switch (phy_model) {
   9471 			case 0x0004: /* XXX */
   9472 				new_phytype = WMPHY_82578;
   9473 				break;
   9474 			default:
   9475 				break;
   9476 			}
   9477 			break;
   9478 		case MII_OUI_xxMARVELL:
   9479 			switch (phy_model) {
   9480 			case MII_MODEL_xxMARVELL_I210:
   9481 				new_phytype = WMPHY_I210;
   9482 				break;
   9483 			case MII_MODEL_xxMARVELL_E1011:
   9484 			case MII_MODEL_xxMARVELL_E1000_3:
   9485 			case MII_MODEL_xxMARVELL_E1000_5:
   9486 			case MII_MODEL_xxMARVELL_E1112:
   9487 				new_phytype = WMPHY_M88;
   9488 				break;
   9489 			case MII_MODEL_xxMARVELL_E1149:
   9490 				new_phytype = WMPHY_BM;
   9491 				break;
   9492 			case MII_MODEL_xxMARVELL_E1111:
   9493 			case MII_MODEL_xxMARVELL_I347:
   9494 			case MII_MODEL_xxMARVELL_E1512:
   9495 			case MII_MODEL_xxMARVELL_E1340M:
   9496 			case MII_MODEL_xxMARVELL_E1543:
   9497 				new_phytype = WMPHY_M88;
   9498 				break;
   9499 			case MII_MODEL_xxMARVELL_I82563:
   9500 				new_phytype = WMPHY_GG82563;
   9501 				break;
   9502 			default:
   9503 				break;
   9504 			}
   9505 			break;
   9506 		case MII_OUI_INTEL:
   9507 			switch (phy_model) {
   9508 			case MII_MODEL_INTEL_I82577:
   9509 				new_phytype = WMPHY_82577;
   9510 				break;
   9511 			case MII_MODEL_INTEL_I82579:
   9512 				new_phytype = WMPHY_82579;
   9513 				break;
   9514 			case MII_MODEL_INTEL_I217:
   9515 				new_phytype = WMPHY_I217;
   9516 				break;
   9517 			case MII_MODEL_INTEL_I82580:
   9518 			case MII_MODEL_INTEL_I350:
   9519 				new_phytype = WMPHY_82580;
   9520 				break;
   9521 			default:
   9522 				break;
   9523 			}
   9524 			break;
   9525 		case MII_OUI_yyINTEL:
   9526 			switch (phy_model) {
   9527 			case MII_MODEL_yyINTEL_I82562G:
   9528 			case MII_MODEL_yyINTEL_I82562EM:
   9529 			case MII_MODEL_yyINTEL_I82562ET:
   9530 				new_phytype = WMPHY_IFE;
   9531 				break;
   9532 			case MII_MODEL_yyINTEL_IGP01E1000:
   9533 				new_phytype = WMPHY_IGP;
   9534 				break;
   9535 			case MII_MODEL_yyINTEL_I82566:
   9536 				new_phytype = WMPHY_IGP_3;
   9537 				break;
   9538 			default:
   9539 				break;
   9540 			}
   9541 			break;
   9542 		default:
   9543 			break;
   9544 		}
   9545 		if (new_phytype == WMPHY_UNKNOWN)
   9546 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9547 			    __func__);
   9548 
   9549 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9550 		    && (sc->sc_phytype != new_phytype )) {
   9551 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9552 			    "was incorrect. PHY type from PHY ID = %u\n",
   9553 			    sc->sc_phytype, new_phytype);
   9554 		}
   9555 	}
   9556 
   9557 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9558 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9559 		/* SGMII */
   9560 		new_readreg = wm_sgmii_readreg;
   9561 		new_writereg = wm_sgmii_writereg;
   9562 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9563 		/* BM2 (phyaddr == 1) */
   9564 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9565 		    && (new_phytype != WMPHY_BM)
   9566 		    && (new_phytype != WMPHY_UNKNOWN))
   9567 			doubt_phytype = new_phytype;
   9568 		new_phytype = WMPHY_BM;
   9569 		new_readreg = wm_gmii_bm_readreg;
   9570 		new_writereg = wm_gmii_bm_writereg;
   9571 	} else if (sc->sc_type >= WM_T_PCH) {
   9572 		/* All PCH* use _hv_ */
   9573 		new_readreg = wm_gmii_hv_readreg;
   9574 		new_writereg = wm_gmii_hv_writereg;
   9575 	} else if (sc->sc_type >= WM_T_ICH8) {
   9576 		/* non-82567 ICH8, 9 and 10 */
   9577 		new_readreg = wm_gmii_i82544_readreg;
   9578 		new_writereg = wm_gmii_i82544_writereg;
   9579 	} else if (sc->sc_type >= WM_T_80003) {
   9580 		/* 80003 */
   9581 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9582 		    && (new_phytype != WMPHY_GG82563)
   9583 		    && (new_phytype != WMPHY_UNKNOWN))
   9584 			doubt_phytype = new_phytype;
   9585 		new_phytype = WMPHY_GG82563;
   9586 		new_readreg = wm_gmii_i80003_readreg;
   9587 		new_writereg = wm_gmii_i80003_writereg;
   9588 	} else if (sc->sc_type >= WM_T_I210) {
   9589 		/* I210 and I211 */
   9590 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9591 		    && (new_phytype != WMPHY_I210)
   9592 		    && (new_phytype != WMPHY_UNKNOWN))
   9593 			doubt_phytype = new_phytype;
   9594 		new_phytype = WMPHY_I210;
   9595 		new_readreg = wm_gmii_gs40g_readreg;
   9596 		new_writereg = wm_gmii_gs40g_writereg;
   9597 	} else if (sc->sc_type >= WM_T_82580) {
   9598 		/* 82580, I350 and I354 */
   9599 		new_readreg = wm_gmii_82580_readreg;
   9600 		new_writereg = wm_gmii_82580_writereg;
   9601 	} else if (sc->sc_type >= WM_T_82544) {
   9602 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9603 		new_readreg = wm_gmii_i82544_readreg;
   9604 		new_writereg = wm_gmii_i82544_writereg;
   9605 	} else {
   9606 		new_readreg = wm_gmii_i82543_readreg;
   9607 		new_writereg = wm_gmii_i82543_writereg;
   9608 	}
   9609 
   9610 	if (new_phytype == WMPHY_BM) {
   9611 		/* All BM use _bm_ */
   9612 		new_readreg = wm_gmii_bm_readreg;
   9613 		new_writereg = wm_gmii_bm_writereg;
   9614 	}
   9615 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9616 		/* All PCH* use _hv_ */
   9617 		new_readreg = wm_gmii_hv_readreg;
   9618 		new_writereg = wm_gmii_hv_writereg;
   9619 	}
   9620 
   9621 	/* Diag output */
   9622 	if (doubt_phytype != WMPHY_UNKNOWN)
   9623 		aprint_error_dev(dev, "Assumed new PHY type was "
   9624 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9625 		    new_phytype);
   9626 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9627 	    && (sc->sc_phytype != new_phytype ))
   9628 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9629 		    "was incorrect. New PHY type = %u\n",
   9630 		    sc->sc_phytype, new_phytype);
   9631 
   9632 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9633 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9634 
   9635 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9636 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9637 		    "function was incorrect.\n");
   9638 
   9639 	/* Update now */
   9640 	sc->sc_phytype = new_phytype;
   9641 	mii->mii_readreg = new_readreg;
   9642 	mii->mii_writereg = new_writereg;
   9643 }
   9644 
   9645 /*
   9646  * wm_get_phy_id_82575:
   9647  *
   9648  * Return PHY ID. Return -1 if it failed.
   9649  */
   9650 static int
   9651 wm_get_phy_id_82575(struct wm_softc *sc)
   9652 {
   9653 	uint32_t reg;
   9654 	int phyid = -1;
   9655 
   9656 	/* XXX */
   9657 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9658 		return -1;
   9659 
   9660 	if (wm_sgmii_uses_mdio(sc)) {
   9661 		switch (sc->sc_type) {
   9662 		case WM_T_82575:
   9663 		case WM_T_82576:
   9664 			reg = CSR_READ(sc, WMREG_MDIC);
   9665 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9666 			break;
   9667 		case WM_T_82580:
   9668 		case WM_T_I350:
   9669 		case WM_T_I354:
   9670 		case WM_T_I210:
   9671 		case WM_T_I211:
   9672 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9673 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9674 			break;
   9675 		default:
   9676 			return -1;
   9677 		}
   9678 	}
   9679 
   9680 	return phyid;
   9681 }
   9682 
   9683 
   9684 /*
   9685  * wm_gmii_mediainit:
   9686  *
   9687  *	Initialize media for use on 1000BASE-T devices.
   9688  */
   9689 static void
   9690 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9691 {
   9692 	device_t dev = sc->sc_dev;
   9693 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9694 	struct mii_data *mii = &sc->sc_mii;
   9695 	uint32_t reg;
   9696 
   9697 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9698 		device_xname(sc->sc_dev), __func__));
   9699 
   9700 	/* We have GMII. */
   9701 	sc->sc_flags |= WM_F_HAS_MII;
   9702 
   9703 	if (sc->sc_type == WM_T_80003)
   9704 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9705 	else
   9706 		sc->sc_tipg = TIPG_1000T_DFLT;
   9707 
   9708 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9709 	if ((sc->sc_type == WM_T_82580)
   9710 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9711 	    || (sc->sc_type == WM_T_I211)) {
   9712 		reg = CSR_READ(sc, WMREG_PHPM);
   9713 		reg &= ~PHPM_GO_LINK_D;
   9714 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9715 	}
   9716 
   9717 	/*
   9718 	 * Let the chip set speed/duplex on its own based on
   9719 	 * signals from the PHY.
   9720 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9721 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9722 	 */
   9723 	sc->sc_ctrl |= CTRL_SLU;
   9724 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9725 
   9726 	/* Initialize our media structures and probe the GMII. */
   9727 	mii->mii_ifp = ifp;
   9728 
   9729 	mii->mii_statchg = wm_gmii_statchg;
   9730 
   9731 	/* get PHY control from SMBus to PCIe */
   9732 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9733 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9734 	    || (sc->sc_type == WM_T_PCH_CNP))
   9735 		wm_smbustopci(sc);
   9736 
   9737 	wm_gmii_reset(sc);
   9738 
   9739 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9740 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9741 	    wm_gmii_mediastatus);
   9742 
   9743 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9744 	    || (sc->sc_type == WM_T_82580)
   9745 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9746 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9747 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9748 			/* Attach only one port */
   9749 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9750 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9751 		} else {
   9752 			int i, id;
   9753 			uint32_t ctrl_ext;
   9754 
   9755 			id = wm_get_phy_id_82575(sc);
   9756 			if (id != -1) {
   9757 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9758 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9759 			}
   9760 			if ((id == -1)
   9761 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9762 				/* Power on sgmii phy if it is disabled */
   9763 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9764 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9765 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9766 				CSR_WRITE_FLUSH(sc);
   9767 				delay(300*1000); /* XXX too long */
   9768 
   9769 				/* from 1 to 8 */
   9770 				for (i = 1; i < 8; i++)
   9771 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9772 					    0xffffffff, i, MII_OFFSET_ANY,
   9773 					    MIIF_DOPAUSE);
   9774 
   9775 				/* restore previous sfp cage power state */
   9776 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9777 			}
   9778 		}
   9779 	} else {
   9780 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9781 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9782 	}
   9783 
   9784 	/*
   9785 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9786 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9787 	 */
   9788 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9789 		|| (sc->sc_type == WM_T_PCH_SPT)
   9790 		|| (sc->sc_type == WM_T_PCH_CNP))
   9791 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9792 		wm_set_mdio_slow_mode_hv(sc);
   9793 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9794 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9795 	}
   9796 
   9797 	/*
   9798 	 * (For ICH8 variants)
   9799 	 * If PHY detection failed, use BM's r/w function and retry.
   9800 	 */
   9801 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9802 		/* if failed, retry with *_bm_* */
   9803 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9804 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9805 		    sc->sc_phytype);
   9806 		sc->sc_phytype = WMPHY_BM;
   9807 		mii->mii_readreg = wm_gmii_bm_readreg;
   9808 		mii->mii_writereg = wm_gmii_bm_writereg;
   9809 
   9810 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9811 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9812 	}
   9813 
   9814 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9815 		/* Any PHY wasn't find */
   9816 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9817 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9818 		sc->sc_phytype = WMPHY_NONE;
   9819 	} else {
   9820 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9821 
   9822 		/*
   9823 		 * PHY Found! Check PHY type again by the second call of
   9824 		 * wm_gmii_setup_phytype.
   9825 		 */
   9826 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9827 		    child->mii_mpd_model);
   9828 
   9829 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9830 	}
   9831 }
   9832 
   9833 /*
   9834  * wm_gmii_mediachange:	[ifmedia interface function]
   9835  *
   9836  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9837  */
   9838 static int
   9839 wm_gmii_mediachange(struct ifnet *ifp)
   9840 {
   9841 	struct wm_softc *sc = ifp->if_softc;
   9842 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9843 	int rc;
   9844 
   9845 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9846 		device_xname(sc->sc_dev), __func__));
   9847 	if ((ifp->if_flags & IFF_UP) == 0)
   9848 		return 0;
   9849 
   9850 	/* Disable D0 LPLU. */
   9851 	wm_lplu_d0_disable(sc);
   9852 
   9853 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9854 	sc->sc_ctrl |= CTRL_SLU;
   9855 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9856 	    || (sc->sc_type > WM_T_82543)) {
   9857 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9858 	} else {
   9859 		sc->sc_ctrl &= ~CTRL_ASDE;
   9860 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9861 		if (ife->ifm_media & IFM_FDX)
   9862 			sc->sc_ctrl |= CTRL_FD;
   9863 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9864 		case IFM_10_T:
   9865 			sc->sc_ctrl |= CTRL_SPEED_10;
   9866 			break;
   9867 		case IFM_100_TX:
   9868 			sc->sc_ctrl |= CTRL_SPEED_100;
   9869 			break;
   9870 		case IFM_1000_T:
   9871 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9872 			break;
   9873 		default:
   9874 			panic("wm_gmii_mediachange: bad media 0x%x",
   9875 			    ife->ifm_media);
   9876 		}
   9877 	}
   9878 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9879 	CSR_WRITE_FLUSH(sc);
   9880 	if (sc->sc_type <= WM_T_82543)
   9881 		wm_gmii_reset(sc);
   9882 
   9883 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9884 		return 0;
   9885 	return rc;
   9886 }
   9887 
   9888 /*
   9889  * wm_gmii_mediastatus:	[ifmedia interface function]
   9890  *
   9891  *	Get the current interface media status on a 1000BASE-T device.
   9892  */
   9893 static void
   9894 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9895 {
   9896 	struct wm_softc *sc = ifp->if_softc;
   9897 
   9898 	ether_mediastatus(ifp, ifmr);
   9899 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9900 	    | sc->sc_flowflags;
   9901 }
   9902 
   9903 #define	MDI_IO		CTRL_SWDPIN(2)
   9904 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9905 #define	MDI_CLK		CTRL_SWDPIN(3)
   9906 
   9907 static void
   9908 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9909 {
   9910 	uint32_t i, v;
   9911 
   9912 	v = CSR_READ(sc, WMREG_CTRL);
   9913 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9914 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9915 
   9916 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9917 		if (data & i)
   9918 			v |= MDI_IO;
   9919 		else
   9920 			v &= ~MDI_IO;
   9921 		CSR_WRITE(sc, WMREG_CTRL, v);
   9922 		CSR_WRITE_FLUSH(sc);
   9923 		delay(10);
   9924 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9925 		CSR_WRITE_FLUSH(sc);
   9926 		delay(10);
   9927 		CSR_WRITE(sc, WMREG_CTRL, v);
   9928 		CSR_WRITE_FLUSH(sc);
   9929 		delay(10);
   9930 	}
   9931 }
   9932 
   9933 static uint32_t
   9934 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9935 {
   9936 	uint32_t v, i, data = 0;
   9937 
   9938 	v = CSR_READ(sc, WMREG_CTRL);
   9939 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9940 	v |= CTRL_SWDPIO(3);
   9941 
   9942 	CSR_WRITE(sc, WMREG_CTRL, v);
   9943 	CSR_WRITE_FLUSH(sc);
   9944 	delay(10);
   9945 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9946 	CSR_WRITE_FLUSH(sc);
   9947 	delay(10);
   9948 	CSR_WRITE(sc, WMREG_CTRL, v);
   9949 	CSR_WRITE_FLUSH(sc);
   9950 	delay(10);
   9951 
   9952 	for (i = 0; i < 16; i++) {
   9953 		data <<= 1;
   9954 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9955 		CSR_WRITE_FLUSH(sc);
   9956 		delay(10);
   9957 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9958 			data |= 1;
   9959 		CSR_WRITE(sc, WMREG_CTRL, v);
   9960 		CSR_WRITE_FLUSH(sc);
   9961 		delay(10);
   9962 	}
   9963 
   9964 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9965 	CSR_WRITE_FLUSH(sc);
   9966 	delay(10);
   9967 	CSR_WRITE(sc, WMREG_CTRL, v);
   9968 	CSR_WRITE_FLUSH(sc);
   9969 	delay(10);
   9970 
   9971 	return data;
   9972 }
   9973 
   9974 #undef MDI_IO
   9975 #undef MDI_DIR
   9976 #undef MDI_CLK
   9977 
   9978 /*
   9979  * wm_gmii_i82543_readreg:	[mii interface function]
   9980  *
   9981  *	Read a PHY register on the GMII (i82543 version).
   9982  */
   9983 static int
   9984 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9985 {
   9986 	struct wm_softc *sc = device_private(dev);
   9987 	int rv;
   9988 
   9989 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9990 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9991 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9992 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9993 
   9994 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9995 	    device_xname(dev), phy, reg, rv));
   9996 
   9997 	return rv;
   9998 }
   9999 
   10000 /*
   10001  * wm_gmii_i82543_writereg:	[mii interface function]
   10002  *
   10003  *	Write a PHY register on the GMII (i82543 version).
   10004  */
   10005 static void
   10006 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10007 {
   10008 	struct wm_softc *sc = device_private(dev);
   10009 
   10010 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10011 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10012 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10013 	    (MII_COMMAND_START << 30), 32);
   10014 }
   10015 
   10016 /*
   10017  * wm_gmii_mdic_readreg:	[mii interface function]
   10018  *
   10019  *	Read a PHY register on the GMII.
   10020  */
   10021 static int
   10022 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10023 {
   10024 	struct wm_softc *sc = device_private(dev);
   10025 	uint32_t mdic = 0;
   10026 	int i, rv;
   10027 
   10028 	if (reg > MII_ADDRMASK) {
   10029 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10030 		    __func__, sc->sc_phytype, reg);
   10031 		reg &= MII_ADDRMASK;
   10032 	}
   10033 
   10034 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10035 	    MDIC_REGADD(reg));
   10036 
   10037 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10038 		mdic = CSR_READ(sc, WMREG_MDIC);
   10039 		if (mdic & MDIC_READY)
   10040 			break;
   10041 		delay(50);
   10042 	}
   10043 
   10044 	if ((mdic & MDIC_READY) == 0) {
   10045 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10046 		    device_xname(dev), phy, reg);
   10047 		rv = 0;
   10048 	} else if (mdic & MDIC_E) {
   10049 #if 0 /* This is normal if no PHY is present. */
   10050 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10051 		    device_xname(dev), phy, reg);
   10052 #endif
   10053 		rv = 0;
   10054 	} else {
   10055 		rv = MDIC_DATA(mdic);
   10056 		if (rv == 0xffff)
   10057 			rv = 0;
   10058 	}
   10059 
   10060 	return rv;
   10061 }
   10062 
   10063 /*
   10064  * wm_gmii_mdic_writereg:	[mii interface function]
   10065  *
   10066  *	Write a PHY register on the GMII.
   10067  */
   10068 static void
   10069 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10070 {
   10071 	struct wm_softc *sc = device_private(dev);
   10072 	uint32_t mdic = 0;
   10073 	int i;
   10074 
   10075 	if (reg > MII_ADDRMASK) {
   10076 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10077 		    __func__, sc->sc_phytype, reg);
   10078 		reg &= MII_ADDRMASK;
   10079 	}
   10080 
   10081 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10082 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10083 
   10084 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10085 		mdic = CSR_READ(sc, WMREG_MDIC);
   10086 		if (mdic & MDIC_READY)
   10087 			break;
   10088 		delay(50);
   10089 	}
   10090 
   10091 	if ((mdic & MDIC_READY) == 0)
   10092 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10093 		    device_xname(dev), phy, reg);
   10094 	else if (mdic & MDIC_E)
   10095 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10096 		    device_xname(dev), phy, reg);
   10097 }
   10098 
   10099 /*
   10100  * wm_gmii_i82544_readreg:	[mii interface function]
   10101  *
   10102  *	Read a PHY register on the GMII.
   10103  */
   10104 static int
   10105 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10106 {
   10107 	struct wm_softc *sc = device_private(dev);
   10108 	int rv;
   10109 
   10110 	if (sc->phy.acquire(sc)) {
   10111 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10112 		return 0;
   10113 	}
   10114 
   10115 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10116 		switch (sc->sc_phytype) {
   10117 		case WMPHY_IGP:
   10118 		case WMPHY_IGP_2:
   10119 		case WMPHY_IGP_3:
   10120 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10121 			    reg);
   10122 			break;
   10123 		default:
   10124 #ifdef WM_DEBUG
   10125 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10126 			    __func__, sc->sc_phytype, reg);
   10127 #endif
   10128 			break;
   10129 		}
   10130 	}
   10131 
   10132 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10133 	sc->phy.release(sc);
   10134 
   10135 	return rv;
   10136 }
   10137 
   10138 /*
   10139  * wm_gmii_i82544_writereg:	[mii interface function]
   10140  *
   10141  *	Write a PHY register on the GMII.
   10142  */
   10143 static void
   10144 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10145 {
   10146 	struct wm_softc *sc = device_private(dev);
   10147 
   10148 	if (sc->phy.acquire(sc)) {
   10149 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10150 		return;
   10151 	}
   10152 
   10153 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10154 		switch (sc->sc_phytype) {
   10155 		case WMPHY_IGP:
   10156 		case WMPHY_IGP_2:
   10157 		case WMPHY_IGP_3:
   10158 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10159 			    reg);
   10160 			break;
   10161 		default:
   10162 #ifdef WM_DEBUG
   10163 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10164 			    __func__, sc->sc_phytype, reg);
   10165 #endif
   10166 			break;
   10167 		}
   10168 	}
   10169 
   10170 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10171 	sc->phy.release(sc);
   10172 }
   10173 
   10174 /*
   10175  * wm_gmii_i80003_readreg:	[mii interface function]
   10176  *
   10177  *	Read a PHY register on the kumeran
   10178  * This could be handled by the PHY layer if we didn't have to lock the
   10179  * ressource ...
   10180  */
   10181 static int
   10182 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10183 {
   10184 	struct wm_softc *sc = device_private(dev);
   10185 	int page_select, temp;
   10186 	int rv;
   10187 
   10188 	if (phy != 1) /* only one PHY on kumeran bus */
   10189 		return 0;
   10190 
   10191 	if (sc->phy.acquire(sc)) {
   10192 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10193 		return 0;
   10194 	}
   10195 
   10196 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10197 		page_select = GG82563_PHY_PAGE_SELECT;
   10198 	else {
   10199 		/*
   10200 		 * Use Alternative Page Select register to access registers
   10201 		 * 30 and 31.
   10202 		 */
   10203 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10204 	}
   10205 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10206 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10207 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10208 		/*
   10209 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10210 		 * register.
   10211 		 */
   10212 		delay(200);
   10213 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10214 			device_printf(dev, "%s failed\n", __func__);
   10215 			rv = 0; /* XXX */
   10216 			goto out;
   10217 		}
   10218 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10219 		delay(200);
   10220 	} else
   10221 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10222 
   10223 out:
   10224 	sc->phy.release(sc);
   10225 	return rv;
   10226 }
   10227 
   10228 /*
   10229  * wm_gmii_i80003_writereg:	[mii interface function]
   10230  *
   10231  *	Write a PHY register on the kumeran.
   10232  * This could be handled by the PHY layer if we didn't have to lock the
   10233  * ressource ...
   10234  */
   10235 static void
   10236 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10237 {
   10238 	struct wm_softc *sc = device_private(dev);
   10239 	int page_select, temp;
   10240 
   10241 	if (phy != 1) /* only one PHY on kumeran bus */
   10242 		return;
   10243 
   10244 	if (sc->phy.acquire(sc)) {
   10245 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10246 		return;
   10247 	}
   10248 
   10249 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10250 		page_select = GG82563_PHY_PAGE_SELECT;
   10251 	else {
   10252 		/*
   10253 		 * Use Alternative Page Select register to access registers
   10254 		 * 30 and 31.
   10255 		 */
   10256 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10257 	}
   10258 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10259 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10260 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10261 		/*
   10262 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10263 		 * register.
   10264 		 */
   10265 		delay(200);
   10266 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10267 			device_printf(dev, "%s failed\n", __func__);
   10268 			goto out;
   10269 		}
   10270 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10271 		delay(200);
   10272 	} else
   10273 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10274 
   10275 out:
   10276 	sc->phy.release(sc);
   10277 }
   10278 
   10279 /*
   10280  * wm_gmii_bm_readreg:	[mii interface function]
   10281  *
   10282  *	Read a PHY register on the kumeran
   10283  * This could be handled by the PHY layer if we didn't have to lock the
   10284  * ressource ...
   10285  */
   10286 static int
   10287 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10288 {
   10289 	struct wm_softc *sc = device_private(dev);
   10290 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10291 	uint16_t val;
   10292 	int rv;
   10293 
   10294 	if (sc->phy.acquire(sc)) {
   10295 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10296 		return 0;
   10297 	}
   10298 
   10299 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10300 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10301 		    || (reg == 31)) ? 1 : phy;
   10302 	/* Page 800 works differently than the rest so it has its own func */
   10303 	if (page == BM_WUC_PAGE) {
   10304 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10305 		rv = val;
   10306 		goto release;
   10307 	}
   10308 
   10309 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10310 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10311 		    && (sc->sc_type != WM_T_82583))
   10312 			wm_gmii_mdic_writereg(dev, phy,
   10313 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10314 		else
   10315 			wm_gmii_mdic_writereg(dev, phy,
   10316 			    BME1000_PHY_PAGE_SELECT, page);
   10317 	}
   10318 
   10319 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10320 
   10321 release:
   10322 	sc->phy.release(sc);
   10323 	return rv;
   10324 }
   10325 
   10326 /*
   10327  * wm_gmii_bm_writereg:	[mii interface function]
   10328  *
   10329  *	Write a PHY register on the kumeran.
   10330  * This could be handled by the PHY layer if we didn't have to lock the
   10331  * ressource ...
   10332  */
   10333 static void
   10334 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10335 {
   10336 	struct wm_softc *sc = device_private(dev);
   10337 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10338 
   10339 	if (sc->phy.acquire(sc)) {
   10340 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10341 		return;
   10342 	}
   10343 
   10344 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10345 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10346 		    || (reg == 31)) ? 1 : phy;
   10347 	/* Page 800 works differently than the rest so it has its own func */
   10348 	if (page == BM_WUC_PAGE) {
   10349 		uint16_t tmp;
   10350 
   10351 		tmp = val;
   10352 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10353 		goto release;
   10354 	}
   10355 
   10356 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10357 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10358 		    && (sc->sc_type != WM_T_82583))
   10359 			wm_gmii_mdic_writereg(dev, phy,
   10360 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10361 		else
   10362 			wm_gmii_mdic_writereg(dev, phy,
   10363 			    BME1000_PHY_PAGE_SELECT, page);
   10364 	}
   10365 
   10366 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10367 
   10368 release:
   10369 	sc->phy.release(sc);
   10370 }
   10371 
   10372 static void
   10373 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10374 {
   10375 	struct wm_softc *sc = device_private(dev);
   10376 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10377 	uint16_t wuce, reg;
   10378 
   10379 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10380 		device_xname(dev), __func__));
   10381 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10382 	if (sc->sc_type == WM_T_PCH) {
   10383 		/* XXX e1000 driver do nothing... why? */
   10384 	}
   10385 
   10386 	/*
   10387 	 * 1) Enable PHY wakeup register first.
   10388 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10389 	 */
   10390 
   10391 	/* Set page 769 */
   10392 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10393 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10394 
   10395 	/* Read WUCE and save it */
   10396 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10397 
   10398 	reg = wuce | BM_WUC_ENABLE_BIT;
   10399 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10400 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10401 
   10402 	/* Select page 800 */
   10403 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10404 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10405 
   10406 	/*
   10407 	 * 2) Access PHY wakeup register.
   10408 	 * See e1000_access_phy_wakeup_reg_bm.
   10409 	 */
   10410 
   10411 	/* Write page 800 */
   10412 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10413 
   10414 	if (rd)
   10415 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10416 	else
   10417 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10418 
   10419 	/*
   10420 	 * 3) Disable PHY wakeup register.
   10421 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10422 	 */
   10423 	/* Set page 769 */
   10424 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10425 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10426 
   10427 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10428 }
   10429 
   10430 /*
   10431  * wm_gmii_hv_readreg:	[mii interface function]
   10432  *
   10433  *	Read a PHY register on the kumeran
   10434  * This could be handled by the PHY layer if we didn't have to lock the
   10435  * ressource ...
   10436  */
   10437 static int
   10438 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10439 {
   10440 	struct wm_softc *sc = device_private(dev);
   10441 	int rv;
   10442 
   10443 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10444 		device_xname(dev), __func__));
   10445 	if (sc->phy.acquire(sc)) {
   10446 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10447 		return 0;
   10448 	}
   10449 
   10450 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10451 	sc->phy.release(sc);
   10452 	return rv;
   10453 }
   10454 
   10455 static int
   10456 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10457 {
   10458 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10459 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10460 	uint16_t val;
   10461 	int rv;
   10462 
   10463 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10464 
   10465 	/* Page 800 works differently than the rest so it has its own func */
   10466 	if (page == BM_WUC_PAGE) {
   10467 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10468 		return val;
   10469 	}
   10470 
   10471 	/*
   10472 	 * Lower than page 768 works differently than the rest so it has its
   10473 	 * own func
   10474 	 */
   10475 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10476 		printf("gmii_hv_readreg!!!\n");
   10477 		return 0;
   10478 	}
   10479 
   10480 	/*
   10481 	 * XXX I21[789] documents say that the SMBus Address register is at
   10482 	 * PHY address 01, Page 0 (not 768), Register 26.
   10483 	 */
   10484 	if (page == HV_INTC_FC_PAGE_START)
   10485 		page = 0;
   10486 
   10487 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10488 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10489 		    page << BME1000_PAGE_SHIFT);
   10490 	}
   10491 
   10492 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10493 	return rv;
   10494 }
   10495 
   10496 /*
   10497  * wm_gmii_hv_writereg:	[mii interface function]
   10498  *
   10499  *	Write a PHY register on the kumeran.
   10500  * This could be handled by the PHY layer if we didn't have to lock the
   10501  * ressource ...
   10502  */
   10503 static void
   10504 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10505 {
   10506 	struct wm_softc *sc = device_private(dev);
   10507 
   10508 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10509 		device_xname(dev), __func__));
   10510 
   10511 	if (sc->phy.acquire(sc)) {
   10512 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10513 		return;
   10514 	}
   10515 
   10516 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10517 	sc->phy.release(sc);
   10518 }
   10519 
   10520 static void
   10521 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10522 {
   10523 	struct wm_softc *sc = device_private(dev);
   10524 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10525 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10526 
   10527 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10528 
   10529 	/* Page 800 works differently than the rest so it has its own func */
   10530 	if (page == BM_WUC_PAGE) {
   10531 		uint16_t tmp;
   10532 
   10533 		tmp = val;
   10534 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10535 		return;
   10536 	}
   10537 
   10538 	/*
   10539 	 * Lower than page 768 works differently than the rest so it has its
   10540 	 * own func
   10541 	 */
   10542 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10543 		printf("gmii_hv_writereg!!!\n");
   10544 		return;
   10545 	}
   10546 
   10547 	{
   10548 		/*
   10549 		 * XXX I21[789] documents say that the SMBus Address register
   10550 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10551 		 */
   10552 		if (page == HV_INTC_FC_PAGE_START)
   10553 			page = 0;
   10554 
   10555 		/*
   10556 		 * XXX Workaround MDIO accesses being disabled after entering
   10557 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10558 		 * register is set)
   10559 		 */
   10560 		if (sc->sc_phytype == WMPHY_82578) {
   10561 			struct mii_softc *child;
   10562 
   10563 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10564 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10565 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10566 			    && ((val & (1 << 11)) != 0)) {
   10567 				printf("XXX need workaround\n");
   10568 			}
   10569 		}
   10570 
   10571 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10572 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10573 			    page << BME1000_PAGE_SHIFT);
   10574 		}
   10575 	}
   10576 
   10577 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10578 }
   10579 
   10580 /*
   10581  * wm_gmii_82580_readreg:	[mii interface function]
   10582  *
   10583  *	Read a PHY register on the 82580 and I350.
   10584  * This could be handled by the PHY layer if we didn't have to lock the
   10585  * ressource ...
   10586  */
   10587 static int
   10588 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10589 {
   10590 	struct wm_softc *sc = device_private(dev);
   10591 	int rv;
   10592 
   10593 	if (sc->phy.acquire(sc) != 0) {
   10594 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10595 		return 0;
   10596 	}
   10597 
   10598 #ifdef DIAGNOSTIC
   10599 	if (reg > MII_ADDRMASK) {
   10600 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10601 		    __func__, sc->sc_phytype, reg);
   10602 		reg &= MII_ADDRMASK;
   10603 	}
   10604 #endif
   10605 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10606 
   10607 	sc->phy.release(sc);
   10608 	return rv;
   10609 }
   10610 
   10611 /*
   10612  * wm_gmii_82580_writereg:	[mii interface function]
   10613  *
   10614  *	Write a PHY register on the 82580 and I350.
   10615  * This could be handled by the PHY layer if we didn't have to lock the
   10616  * ressource ...
   10617  */
   10618 static void
   10619 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10620 {
   10621 	struct wm_softc *sc = device_private(dev);
   10622 
   10623 	if (sc->phy.acquire(sc) != 0) {
   10624 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10625 		return;
   10626 	}
   10627 
   10628 #ifdef DIAGNOSTIC
   10629 	if (reg > MII_ADDRMASK) {
   10630 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10631 		    __func__, sc->sc_phytype, reg);
   10632 		reg &= MII_ADDRMASK;
   10633 	}
   10634 #endif
   10635 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10636 
   10637 	sc->phy.release(sc);
   10638 }
   10639 
   10640 /*
   10641  * wm_gmii_gs40g_readreg:	[mii interface function]
   10642  *
   10643  *	Read a PHY register on the I2100 and I211.
   10644  * This could be handled by the PHY layer if we didn't have to lock the
   10645  * ressource ...
   10646  */
   10647 static int
   10648 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10649 {
   10650 	struct wm_softc *sc = device_private(dev);
   10651 	int page, offset;
   10652 	int rv;
   10653 
   10654 	/* Acquire semaphore */
   10655 	if (sc->phy.acquire(sc)) {
   10656 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10657 		return 0;
   10658 	}
   10659 
   10660 	/* Page select */
   10661 	page = reg >> GS40G_PAGE_SHIFT;
   10662 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10663 
   10664 	/* Read reg */
   10665 	offset = reg & GS40G_OFFSET_MASK;
   10666 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10667 
   10668 	sc->phy.release(sc);
   10669 	return rv;
   10670 }
   10671 
   10672 /*
   10673  * wm_gmii_gs40g_writereg:	[mii interface function]
   10674  *
   10675  *	Write a PHY register on the I210 and I211.
   10676  * This could be handled by the PHY layer if we didn't have to lock the
   10677  * ressource ...
   10678  */
   10679 static void
   10680 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10681 {
   10682 	struct wm_softc *sc = device_private(dev);
   10683 	int page, offset;
   10684 
   10685 	/* Acquire semaphore */
   10686 	if (sc->phy.acquire(sc)) {
   10687 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10688 		return;
   10689 	}
   10690 
   10691 	/* Page select */
   10692 	page = reg >> GS40G_PAGE_SHIFT;
   10693 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10694 
   10695 	/* Write reg */
   10696 	offset = reg & GS40G_OFFSET_MASK;
   10697 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10698 
   10699 	/* Release semaphore */
   10700 	sc->phy.release(sc);
   10701 }
   10702 
   10703 /*
   10704  * wm_gmii_statchg:	[mii interface function]
   10705  *
   10706  *	Callback from MII layer when media changes.
   10707  */
   10708 static void
   10709 wm_gmii_statchg(struct ifnet *ifp)
   10710 {
   10711 	struct wm_softc *sc = ifp->if_softc;
   10712 	struct mii_data *mii = &sc->sc_mii;
   10713 
   10714 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10715 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10716 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10717 
   10718 	/*
   10719 	 * Get flow control negotiation result.
   10720 	 */
   10721 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10722 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10723 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10724 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10725 	}
   10726 
   10727 	if (sc->sc_flowflags & IFM_FLOW) {
   10728 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10729 			sc->sc_ctrl |= CTRL_TFCE;
   10730 			sc->sc_fcrtl |= FCRTL_XONE;
   10731 		}
   10732 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10733 			sc->sc_ctrl |= CTRL_RFCE;
   10734 	}
   10735 
   10736 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10737 		DPRINTF(WM_DEBUG_LINK,
   10738 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10739 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10740 	} else {
   10741 		DPRINTF(WM_DEBUG_LINK,
   10742 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10743 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10744 	}
   10745 
   10746 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10747 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10748 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10749 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10750 	if (sc->sc_type == WM_T_80003) {
   10751 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10752 		case IFM_1000_T:
   10753 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10754 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10755 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10756 			break;
   10757 		default:
   10758 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10759 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10760 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10761 			break;
   10762 		}
   10763 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10764 	}
   10765 }
   10766 
   10767 /* kumeran related (80003, ICH* and PCH*) */
   10768 
   10769 /*
   10770  * wm_kmrn_readreg:
   10771  *
   10772  *	Read a kumeran register
   10773  */
   10774 static int
   10775 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10776 {
   10777 	int rv;
   10778 
   10779 	if (sc->sc_type == WM_T_80003)
   10780 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10781 	else
   10782 		rv = sc->phy.acquire(sc);
   10783 	if (rv != 0) {
   10784 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10785 		    __func__);
   10786 		return rv;
   10787 	}
   10788 
   10789 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10790 
   10791 	if (sc->sc_type == WM_T_80003)
   10792 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10793 	else
   10794 		sc->phy.release(sc);
   10795 
   10796 	return rv;
   10797 }
   10798 
   10799 static int
   10800 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10801 {
   10802 
   10803 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10804 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10805 	    KUMCTRLSTA_REN);
   10806 	CSR_WRITE_FLUSH(sc);
   10807 	delay(2);
   10808 
   10809 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10810 
   10811 	return 0;
   10812 }
   10813 
   10814 /*
   10815  * wm_kmrn_writereg:
   10816  *
   10817  *	Write a kumeran register
   10818  */
   10819 static int
   10820 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10821 {
   10822 	int rv;
   10823 
   10824 	if (sc->sc_type == WM_T_80003)
   10825 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10826 	else
   10827 		rv = sc->phy.acquire(sc);
   10828 	if (rv != 0) {
   10829 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10830 		    __func__);
   10831 		return rv;
   10832 	}
   10833 
   10834 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10835 
   10836 	if (sc->sc_type == WM_T_80003)
   10837 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10838 	else
   10839 		sc->phy.release(sc);
   10840 
   10841 	return rv;
   10842 }
   10843 
   10844 static int
   10845 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10846 {
   10847 
   10848 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10849 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10850 
   10851 	return 0;
   10852 }
   10853 
   10854 /* SGMII related */
   10855 
   10856 /*
   10857  * wm_sgmii_uses_mdio
   10858  *
   10859  * Check whether the transaction is to the internal PHY or the external
   10860  * MDIO interface. Return true if it's MDIO.
   10861  */
   10862 static bool
   10863 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10864 {
   10865 	uint32_t reg;
   10866 	bool ismdio = false;
   10867 
   10868 	switch (sc->sc_type) {
   10869 	case WM_T_82575:
   10870 	case WM_T_82576:
   10871 		reg = CSR_READ(sc, WMREG_MDIC);
   10872 		ismdio = ((reg & MDIC_DEST) != 0);
   10873 		break;
   10874 	case WM_T_82580:
   10875 	case WM_T_I350:
   10876 	case WM_T_I354:
   10877 	case WM_T_I210:
   10878 	case WM_T_I211:
   10879 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10880 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10881 		break;
   10882 	default:
   10883 		break;
   10884 	}
   10885 
   10886 	return ismdio;
   10887 }
   10888 
   10889 /*
   10890  * wm_sgmii_readreg:	[mii interface function]
   10891  *
   10892  *	Read a PHY register on the SGMII
   10893  * This could be handled by the PHY layer if we didn't have to lock the
   10894  * ressource ...
   10895  */
   10896 static int
   10897 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10898 {
   10899 	struct wm_softc *sc = device_private(dev);
   10900 	uint32_t i2ccmd;
   10901 	int i, rv;
   10902 
   10903 	if (sc->phy.acquire(sc)) {
   10904 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10905 		return 0;
   10906 	}
   10907 
   10908 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10909 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10910 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10911 
   10912 	/* Poll the ready bit */
   10913 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10914 		delay(50);
   10915 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10916 		if (i2ccmd & I2CCMD_READY)
   10917 			break;
   10918 	}
   10919 	if ((i2ccmd & I2CCMD_READY) == 0)
   10920 		device_printf(dev, "I2CCMD Read did not complete\n");
   10921 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10922 		device_printf(dev, "I2CCMD Error bit set\n");
   10923 
   10924 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10925 
   10926 	sc->phy.release(sc);
   10927 	return rv;
   10928 }
   10929 
   10930 /*
   10931  * wm_sgmii_writereg:	[mii interface function]
   10932  *
   10933  *	Write a PHY register on the SGMII.
   10934  * This could be handled by the PHY layer if we didn't have to lock the
   10935  * ressource ...
   10936  */
   10937 static void
   10938 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10939 {
   10940 	struct wm_softc *sc = device_private(dev);
   10941 	uint32_t i2ccmd;
   10942 	int i;
   10943 	int swapdata;
   10944 
   10945 	if (sc->phy.acquire(sc) != 0) {
   10946 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10947 		return;
   10948 	}
   10949 	/* Swap the data bytes for the I2C interface */
   10950 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10951 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10952 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   10953 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10954 
   10955 	/* Poll the ready bit */
   10956 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10957 		delay(50);
   10958 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10959 		if (i2ccmd & I2CCMD_READY)
   10960 			break;
   10961 	}
   10962 	if ((i2ccmd & I2CCMD_READY) == 0)
   10963 		device_printf(dev, "I2CCMD Write did not complete\n");
   10964 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10965 		device_printf(dev, "I2CCMD Error bit set\n");
   10966 
   10967 	sc->phy.release(sc);
   10968 }
   10969 
   10970 /* TBI related */
   10971 
   10972 /*
   10973  * wm_tbi_mediainit:
   10974  *
   10975  *	Initialize media for use on 1000BASE-X devices.
   10976  */
   10977 static void
   10978 wm_tbi_mediainit(struct wm_softc *sc)
   10979 {
   10980 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10981 	const char *sep = "";
   10982 
   10983 	if (sc->sc_type < WM_T_82543)
   10984 		sc->sc_tipg = TIPG_WM_DFLT;
   10985 	else
   10986 		sc->sc_tipg = TIPG_LG_DFLT;
   10987 
   10988 	sc->sc_tbi_serdes_anegticks = 5;
   10989 
   10990 	/* Initialize our media structures */
   10991 	sc->sc_mii.mii_ifp = ifp;
   10992 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10993 
   10994 	if ((sc->sc_type >= WM_T_82575)
   10995 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10996 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10997 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10998 	else
   10999 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11000 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11001 
   11002 	/*
   11003 	 * SWD Pins:
   11004 	 *
   11005 	 *	0 = Link LED (output)
   11006 	 *	1 = Loss Of Signal (input)
   11007 	 */
   11008 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11009 
   11010 	/* XXX Perhaps this is only for TBI */
   11011 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11012 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11013 
   11014 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11015 		sc->sc_ctrl &= ~CTRL_LRST;
   11016 
   11017 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11018 
   11019 #define	ADD(ss, mm, dd)							\
   11020 do {									\
   11021 	aprint_normal("%s%s", sep, ss);					\
   11022 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11023 	sep = ", ";							\
   11024 } while (/*CONSTCOND*/0)
   11025 
   11026 	aprint_normal_dev(sc->sc_dev, "");
   11027 
   11028 	if (sc->sc_type == WM_T_I354) {
   11029 		uint32_t status;
   11030 
   11031 		status = CSR_READ(sc, WMREG_STATUS);
   11032 		if (((status & STATUS_2P5_SKU) != 0)
   11033 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11034 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11035 		} else
   11036 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11037 	} else if (sc->sc_type == WM_T_82545) {
   11038 		/* Only 82545 is LX (XXX except SFP) */
   11039 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11040 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11041 	} else {
   11042 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11043 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11044 	}
   11045 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11046 	aprint_normal("\n");
   11047 
   11048 #undef ADD
   11049 
   11050 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11051 }
   11052 
   11053 /*
   11054  * wm_tbi_mediachange:	[ifmedia interface function]
   11055  *
   11056  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11057  */
   11058 static int
   11059 wm_tbi_mediachange(struct ifnet *ifp)
   11060 {
   11061 	struct wm_softc *sc = ifp->if_softc;
   11062 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11063 	uint32_t status;
   11064 	int i;
   11065 
   11066 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11067 		/* XXX need some work for >= 82571 and < 82575 */
   11068 		if (sc->sc_type < WM_T_82575)
   11069 			return 0;
   11070 	}
   11071 
   11072 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11073 	    || (sc->sc_type >= WM_T_82575))
   11074 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11075 
   11076 	sc->sc_ctrl &= ~CTRL_LRST;
   11077 	sc->sc_txcw = TXCW_ANE;
   11078 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11079 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11080 	else if (ife->ifm_media & IFM_FDX)
   11081 		sc->sc_txcw |= TXCW_FD;
   11082 	else
   11083 		sc->sc_txcw |= TXCW_HD;
   11084 
   11085 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11086 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11087 
   11088 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11089 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11090 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11091 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11092 	CSR_WRITE_FLUSH(sc);
   11093 	delay(1000);
   11094 
   11095 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11096 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11097 
   11098 	/*
   11099 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11100 	 * optics detect a signal, 0 if they don't.
   11101 	 */
   11102 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11103 		/* Have signal; wait for the link to come up. */
   11104 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11105 			delay(10000);
   11106 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11107 				break;
   11108 		}
   11109 
   11110 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11111 			    device_xname(sc->sc_dev),i));
   11112 
   11113 		status = CSR_READ(sc, WMREG_STATUS);
   11114 		DPRINTF(WM_DEBUG_LINK,
   11115 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11116 			device_xname(sc->sc_dev),status, STATUS_LU));
   11117 		if (status & STATUS_LU) {
   11118 			/* Link is up. */
   11119 			DPRINTF(WM_DEBUG_LINK,
   11120 			    ("%s: LINK: set media -> link up %s\n",
   11121 			    device_xname(sc->sc_dev),
   11122 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11123 
   11124 			/*
   11125 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11126 			 * so we should update sc->sc_ctrl
   11127 			 */
   11128 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11129 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11130 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11131 			if (status & STATUS_FD)
   11132 				sc->sc_tctl |=
   11133 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11134 			else
   11135 				sc->sc_tctl |=
   11136 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11137 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11138 				sc->sc_fcrtl |= FCRTL_XONE;
   11139 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11140 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11141 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11142 				      sc->sc_fcrtl);
   11143 			sc->sc_tbi_linkup = 1;
   11144 		} else {
   11145 			if (i == WM_LINKUP_TIMEOUT)
   11146 				wm_check_for_link(sc);
   11147 			/* Link is down. */
   11148 			DPRINTF(WM_DEBUG_LINK,
   11149 			    ("%s: LINK: set media -> link down\n",
   11150 			    device_xname(sc->sc_dev)));
   11151 			sc->sc_tbi_linkup = 0;
   11152 		}
   11153 	} else {
   11154 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11155 		    device_xname(sc->sc_dev)));
   11156 		sc->sc_tbi_linkup = 0;
   11157 	}
   11158 
   11159 	wm_tbi_serdes_set_linkled(sc);
   11160 
   11161 	return 0;
   11162 }
   11163 
   11164 /*
   11165  * wm_tbi_mediastatus:	[ifmedia interface function]
   11166  *
   11167  *	Get the current interface media status on a 1000BASE-X device.
   11168  */
   11169 static void
   11170 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11171 {
   11172 	struct wm_softc *sc = ifp->if_softc;
   11173 	uint32_t ctrl, status;
   11174 
   11175 	ifmr->ifm_status = IFM_AVALID;
   11176 	ifmr->ifm_active = IFM_ETHER;
   11177 
   11178 	status = CSR_READ(sc, WMREG_STATUS);
   11179 	if ((status & STATUS_LU) == 0) {
   11180 		ifmr->ifm_active |= IFM_NONE;
   11181 		return;
   11182 	}
   11183 
   11184 	ifmr->ifm_status |= IFM_ACTIVE;
   11185 	/* Only 82545 is LX */
   11186 	if (sc->sc_type == WM_T_82545)
   11187 		ifmr->ifm_active |= IFM_1000_LX;
   11188 	else
   11189 		ifmr->ifm_active |= IFM_1000_SX;
   11190 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11191 		ifmr->ifm_active |= IFM_FDX;
   11192 	else
   11193 		ifmr->ifm_active |= IFM_HDX;
   11194 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11195 	if (ctrl & CTRL_RFCE)
   11196 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11197 	if (ctrl & CTRL_TFCE)
   11198 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11199 }
   11200 
   11201 /* XXX TBI only */
   11202 static int
   11203 wm_check_for_link(struct wm_softc *sc)
   11204 {
   11205 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11206 	uint32_t rxcw;
   11207 	uint32_t ctrl;
   11208 	uint32_t status;
   11209 	uint32_t sig;
   11210 
   11211 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11212 		/* XXX need some work for >= 82571 */
   11213 		if (sc->sc_type >= WM_T_82571) {
   11214 			sc->sc_tbi_linkup = 1;
   11215 			return 0;
   11216 		}
   11217 	}
   11218 
   11219 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11220 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11221 	status = CSR_READ(sc, WMREG_STATUS);
   11222 
   11223 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11224 
   11225 	DPRINTF(WM_DEBUG_LINK,
   11226 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11227 		device_xname(sc->sc_dev), __func__,
   11228 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11229 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11230 
   11231 	/*
   11232 	 * SWDPIN   LU RXCW
   11233 	 *      0    0    0
   11234 	 *      0    0    1	(should not happen)
   11235 	 *      0    1    0	(should not happen)
   11236 	 *      0    1    1	(should not happen)
   11237 	 *      1    0    0	Disable autonego and force linkup
   11238 	 *      1    0    1	got /C/ but not linkup yet
   11239 	 *      1    1    0	(linkup)
   11240 	 *      1    1    1	If IFM_AUTO, back to autonego
   11241 	 *
   11242 	 */
   11243 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11244 	    && ((status & STATUS_LU) == 0)
   11245 	    && ((rxcw & RXCW_C) == 0)) {
   11246 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11247 			__func__));
   11248 		sc->sc_tbi_linkup = 0;
   11249 		/* Disable auto-negotiation in the TXCW register */
   11250 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11251 
   11252 		/*
   11253 		 * Force link-up and also force full-duplex.
   11254 		 *
   11255 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11256 		 * so we should update sc->sc_ctrl
   11257 		 */
   11258 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11259 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11260 	} else if (((status & STATUS_LU) != 0)
   11261 	    && ((rxcw & RXCW_C) != 0)
   11262 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11263 		sc->sc_tbi_linkup = 1;
   11264 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11265 			__func__));
   11266 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11267 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11268 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11269 	    && ((rxcw & RXCW_C) != 0)) {
   11270 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11271 	} else {
   11272 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11273 			status));
   11274 	}
   11275 
   11276 	return 0;
   11277 }
   11278 
   11279 /*
   11280  * wm_tbi_tick:
   11281  *
   11282  *	Check the link on TBI devices.
   11283  *	This function acts as mii_tick().
   11284  */
   11285 static void
   11286 wm_tbi_tick(struct wm_softc *sc)
   11287 {
   11288 	struct mii_data *mii = &sc->sc_mii;
   11289 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11290 	uint32_t status;
   11291 
   11292 	KASSERT(WM_CORE_LOCKED(sc));
   11293 
   11294 	status = CSR_READ(sc, WMREG_STATUS);
   11295 
   11296 	/* XXX is this needed? */
   11297 	(void)CSR_READ(sc, WMREG_RXCW);
   11298 	(void)CSR_READ(sc, WMREG_CTRL);
   11299 
   11300 	/* set link status */
   11301 	if ((status & STATUS_LU) == 0) {
   11302 		DPRINTF(WM_DEBUG_LINK,
   11303 		    ("%s: LINK: checklink -> down\n",
   11304 			device_xname(sc->sc_dev)));
   11305 		sc->sc_tbi_linkup = 0;
   11306 	} else if (sc->sc_tbi_linkup == 0) {
   11307 		DPRINTF(WM_DEBUG_LINK,
   11308 		    ("%s: LINK: checklink -> up %s\n",
   11309 			device_xname(sc->sc_dev),
   11310 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11311 		sc->sc_tbi_linkup = 1;
   11312 		sc->sc_tbi_serdes_ticks = 0;
   11313 	}
   11314 
   11315 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11316 		goto setled;
   11317 
   11318 	if ((status & STATUS_LU) == 0) {
   11319 		sc->sc_tbi_linkup = 0;
   11320 		/* If the timer expired, retry autonegotiation */
   11321 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11322 		    && (++sc->sc_tbi_serdes_ticks
   11323 			>= sc->sc_tbi_serdes_anegticks)) {
   11324 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11325 			sc->sc_tbi_serdes_ticks = 0;
   11326 			/*
   11327 			 * Reset the link, and let autonegotiation do
   11328 			 * its thing
   11329 			 */
   11330 			sc->sc_ctrl |= CTRL_LRST;
   11331 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11332 			CSR_WRITE_FLUSH(sc);
   11333 			delay(1000);
   11334 			sc->sc_ctrl &= ~CTRL_LRST;
   11335 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11336 			CSR_WRITE_FLUSH(sc);
   11337 			delay(1000);
   11338 			CSR_WRITE(sc, WMREG_TXCW,
   11339 			    sc->sc_txcw & ~TXCW_ANE);
   11340 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11341 		}
   11342 	}
   11343 
   11344 setled:
   11345 	wm_tbi_serdes_set_linkled(sc);
   11346 }
   11347 
   11348 /* SERDES related */
   11349 static void
   11350 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11351 {
   11352 	uint32_t reg;
   11353 
   11354 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11355 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11356 		return;
   11357 
   11358 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11359 	reg |= PCS_CFG_PCS_EN;
   11360 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11361 
   11362 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11363 	reg &= ~CTRL_EXT_SWDPIN(3);
   11364 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11365 	CSR_WRITE_FLUSH(sc);
   11366 }
   11367 
   11368 static int
   11369 wm_serdes_mediachange(struct ifnet *ifp)
   11370 {
   11371 	struct wm_softc *sc = ifp->if_softc;
   11372 	bool pcs_autoneg = true; /* XXX */
   11373 	uint32_t ctrl_ext, pcs_lctl, reg;
   11374 
   11375 	/* XXX Currently, this function is not called on 8257[12] */
   11376 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11377 	    || (sc->sc_type >= WM_T_82575))
   11378 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11379 
   11380 	wm_serdes_power_up_link_82575(sc);
   11381 
   11382 	sc->sc_ctrl |= CTRL_SLU;
   11383 
   11384 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11385 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11386 
   11387 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11388 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11389 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11390 	case CTRL_EXT_LINK_MODE_SGMII:
   11391 		pcs_autoneg = true;
   11392 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11393 		break;
   11394 	case CTRL_EXT_LINK_MODE_1000KX:
   11395 		pcs_autoneg = false;
   11396 		/* FALLTHROUGH */
   11397 	default:
   11398 		if ((sc->sc_type == WM_T_82575)
   11399 		    || (sc->sc_type == WM_T_82576)) {
   11400 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11401 				pcs_autoneg = false;
   11402 		}
   11403 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11404 		    | CTRL_FRCFDX;
   11405 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11406 	}
   11407 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11408 
   11409 	if (pcs_autoneg) {
   11410 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11411 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11412 
   11413 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11414 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11415 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11416 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11417 	} else
   11418 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11419 
   11420 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11421 
   11422 
   11423 	return 0;
   11424 }
   11425 
   11426 static void
   11427 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11428 {
   11429 	struct wm_softc *sc = ifp->if_softc;
   11430 	struct mii_data *mii = &sc->sc_mii;
   11431 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11432 	uint32_t pcs_adv, pcs_lpab, reg;
   11433 
   11434 	ifmr->ifm_status = IFM_AVALID;
   11435 	ifmr->ifm_active = IFM_ETHER;
   11436 
   11437 	/* Check PCS */
   11438 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11439 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11440 		ifmr->ifm_active |= IFM_NONE;
   11441 		sc->sc_tbi_linkup = 0;
   11442 		goto setled;
   11443 	}
   11444 
   11445 	sc->sc_tbi_linkup = 1;
   11446 	ifmr->ifm_status |= IFM_ACTIVE;
   11447 	if (sc->sc_type == WM_T_I354) {
   11448 		uint32_t status;
   11449 
   11450 		status = CSR_READ(sc, WMREG_STATUS);
   11451 		if (((status & STATUS_2P5_SKU) != 0)
   11452 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11453 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11454 		} else
   11455 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11456 	} else {
   11457 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11458 		case PCS_LSTS_SPEED_10:
   11459 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11460 			break;
   11461 		case PCS_LSTS_SPEED_100:
   11462 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11463 			break;
   11464 		case PCS_LSTS_SPEED_1000:
   11465 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11466 			break;
   11467 		default:
   11468 			device_printf(sc->sc_dev, "Unknown speed\n");
   11469 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11470 			break;
   11471 		}
   11472 	}
   11473 	if ((reg & PCS_LSTS_FDX) != 0)
   11474 		ifmr->ifm_active |= IFM_FDX;
   11475 	else
   11476 		ifmr->ifm_active |= IFM_HDX;
   11477 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11478 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11479 		/* Check flow */
   11480 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11481 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11482 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11483 			goto setled;
   11484 		}
   11485 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11486 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11487 		DPRINTF(WM_DEBUG_LINK,
   11488 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11489 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11490 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11491 			mii->mii_media_active |= IFM_FLOW
   11492 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11493 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11494 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11495 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11496 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11497 			mii->mii_media_active |= IFM_FLOW
   11498 			    | IFM_ETH_TXPAUSE;
   11499 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11500 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11501 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11502 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11503 			mii->mii_media_active |= IFM_FLOW
   11504 			    | IFM_ETH_RXPAUSE;
   11505 		}
   11506 	}
   11507 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11508 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11509 setled:
   11510 	wm_tbi_serdes_set_linkled(sc);
   11511 }
   11512 
   11513 /*
   11514  * wm_serdes_tick:
   11515  *
   11516  *	Check the link on serdes devices.
   11517  */
   11518 static void
   11519 wm_serdes_tick(struct wm_softc *sc)
   11520 {
   11521 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11522 	struct mii_data *mii = &sc->sc_mii;
   11523 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11524 	uint32_t reg;
   11525 
   11526 	KASSERT(WM_CORE_LOCKED(sc));
   11527 
   11528 	mii->mii_media_status = IFM_AVALID;
   11529 	mii->mii_media_active = IFM_ETHER;
   11530 
   11531 	/* Check PCS */
   11532 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11533 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11534 		mii->mii_media_status |= IFM_ACTIVE;
   11535 		sc->sc_tbi_linkup = 1;
   11536 		sc->sc_tbi_serdes_ticks = 0;
   11537 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11538 		if ((reg & PCS_LSTS_FDX) != 0)
   11539 			mii->mii_media_active |= IFM_FDX;
   11540 		else
   11541 			mii->mii_media_active |= IFM_HDX;
   11542 	} else {
   11543 		mii->mii_media_status |= IFM_NONE;
   11544 		sc->sc_tbi_linkup = 0;
   11545 		/* If the timer expired, retry autonegotiation */
   11546 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11547 		    && (++sc->sc_tbi_serdes_ticks
   11548 			>= sc->sc_tbi_serdes_anegticks)) {
   11549 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11550 			sc->sc_tbi_serdes_ticks = 0;
   11551 			/* XXX */
   11552 			wm_serdes_mediachange(ifp);
   11553 		}
   11554 	}
   11555 
   11556 	wm_tbi_serdes_set_linkled(sc);
   11557 }
   11558 
   11559 /* SFP related */
   11560 
   11561 static int
   11562 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11563 {
   11564 	uint32_t i2ccmd;
   11565 	int i;
   11566 
   11567 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11568 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11569 
   11570 	/* Poll the ready bit */
   11571 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11572 		delay(50);
   11573 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11574 		if (i2ccmd & I2CCMD_READY)
   11575 			break;
   11576 	}
   11577 	if ((i2ccmd & I2CCMD_READY) == 0)
   11578 		return -1;
   11579 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11580 		return -1;
   11581 
   11582 	*data = i2ccmd & 0x00ff;
   11583 
   11584 	return 0;
   11585 }
   11586 
   11587 static uint32_t
   11588 wm_sfp_get_media_type(struct wm_softc *sc)
   11589 {
   11590 	uint32_t ctrl_ext;
   11591 	uint8_t val = 0;
   11592 	int timeout = 3;
   11593 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11594 	int rv = -1;
   11595 
   11596 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11597 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11598 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11599 	CSR_WRITE_FLUSH(sc);
   11600 
   11601 	/* Read SFP module data */
   11602 	while (timeout) {
   11603 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11604 		if (rv == 0)
   11605 			break;
   11606 		delay(100*1000); /* XXX too big */
   11607 		timeout--;
   11608 	}
   11609 	if (rv != 0)
   11610 		goto out;
   11611 	switch (val) {
   11612 	case SFF_SFP_ID_SFF:
   11613 		aprint_normal_dev(sc->sc_dev,
   11614 		    "Module/Connector soldered to board\n");
   11615 		break;
   11616 	case SFF_SFP_ID_SFP:
   11617 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11618 		break;
   11619 	case SFF_SFP_ID_UNKNOWN:
   11620 		goto out;
   11621 	default:
   11622 		break;
   11623 	}
   11624 
   11625 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11626 	if (rv != 0) {
   11627 		goto out;
   11628 	}
   11629 
   11630 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11631 		mediatype = WM_MEDIATYPE_SERDES;
   11632 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11633 		sc->sc_flags |= WM_F_SGMII;
   11634 		mediatype = WM_MEDIATYPE_COPPER;
   11635 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11636 		sc->sc_flags |= WM_F_SGMII;
   11637 		mediatype = WM_MEDIATYPE_SERDES;
   11638 	}
   11639 
   11640 out:
   11641 	/* Restore I2C interface setting */
   11642 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11643 
   11644 	return mediatype;
   11645 }
   11646 
   11647 /*
   11648  * NVM related.
   11649  * Microwire, SPI (w/wo EERD) and Flash.
   11650  */
   11651 
   11652 /* Both spi and uwire */
   11653 
   11654 /*
   11655  * wm_eeprom_sendbits:
   11656  *
   11657  *	Send a series of bits to the EEPROM.
   11658  */
   11659 static void
   11660 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11661 {
   11662 	uint32_t reg;
   11663 	int x;
   11664 
   11665 	reg = CSR_READ(sc, WMREG_EECD);
   11666 
   11667 	for (x = nbits; x > 0; x--) {
   11668 		if (bits & (1U << (x - 1)))
   11669 			reg |= EECD_DI;
   11670 		else
   11671 			reg &= ~EECD_DI;
   11672 		CSR_WRITE(sc, WMREG_EECD, reg);
   11673 		CSR_WRITE_FLUSH(sc);
   11674 		delay(2);
   11675 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11676 		CSR_WRITE_FLUSH(sc);
   11677 		delay(2);
   11678 		CSR_WRITE(sc, WMREG_EECD, reg);
   11679 		CSR_WRITE_FLUSH(sc);
   11680 		delay(2);
   11681 	}
   11682 }
   11683 
   11684 /*
   11685  * wm_eeprom_recvbits:
   11686  *
   11687  *	Receive a series of bits from the EEPROM.
   11688  */
   11689 static void
   11690 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11691 {
   11692 	uint32_t reg, val;
   11693 	int x;
   11694 
   11695 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11696 
   11697 	val = 0;
   11698 	for (x = nbits; x > 0; x--) {
   11699 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11700 		CSR_WRITE_FLUSH(sc);
   11701 		delay(2);
   11702 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11703 			val |= (1U << (x - 1));
   11704 		CSR_WRITE(sc, WMREG_EECD, reg);
   11705 		CSR_WRITE_FLUSH(sc);
   11706 		delay(2);
   11707 	}
   11708 	*valp = val;
   11709 }
   11710 
   11711 /* Microwire */
   11712 
   11713 /*
   11714  * wm_nvm_read_uwire:
   11715  *
   11716  *	Read a word from the EEPROM using the MicroWire protocol.
   11717  */
   11718 static int
   11719 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11720 {
   11721 	uint32_t reg, val;
   11722 	int i;
   11723 
   11724 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11725 		device_xname(sc->sc_dev), __func__));
   11726 
   11727 	if (sc->nvm.acquire(sc) != 0)
   11728 		return -1;
   11729 
   11730 	for (i = 0; i < wordcnt; i++) {
   11731 		/* Clear SK and DI. */
   11732 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11733 		CSR_WRITE(sc, WMREG_EECD, reg);
   11734 
   11735 		/*
   11736 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11737 		 * and Xen.
   11738 		 *
   11739 		 * We use this workaround only for 82540 because qemu's
   11740 		 * e1000 act as 82540.
   11741 		 */
   11742 		if (sc->sc_type == WM_T_82540) {
   11743 			reg |= EECD_SK;
   11744 			CSR_WRITE(sc, WMREG_EECD, reg);
   11745 			reg &= ~EECD_SK;
   11746 			CSR_WRITE(sc, WMREG_EECD, reg);
   11747 			CSR_WRITE_FLUSH(sc);
   11748 			delay(2);
   11749 		}
   11750 		/* XXX: end of workaround */
   11751 
   11752 		/* Set CHIP SELECT. */
   11753 		reg |= EECD_CS;
   11754 		CSR_WRITE(sc, WMREG_EECD, reg);
   11755 		CSR_WRITE_FLUSH(sc);
   11756 		delay(2);
   11757 
   11758 		/* Shift in the READ command. */
   11759 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11760 
   11761 		/* Shift in address. */
   11762 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11763 
   11764 		/* Shift out the data. */
   11765 		wm_eeprom_recvbits(sc, &val, 16);
   11766 		data[i] = val & 0xffff;
   11767 
   11768 		/* Clear CHIP SELECT. */
   11769 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11770 		CSR_WRITE(sc, WMREG_EECD, reg);
   11771 		CSR_WRITE_FLUSH(sc);
   11772 		delay(2);
   11773 	}
   11774 
   11775 	sc->nvm.release(sc);
   11776 	return 0;
   11777 }
   11778 
   11779 /* SPI */
   11780 
   11781 /*
   11782  * Set SPI and FLASH related information from the EECD register.
   11783  * For 82541 and 82547, the word size is taken from EEPROM.
   11784  */
   11785 static int
   11786 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11787 {
   11788 	int size;
   11789 	uint32_t reg;
   11790 	uint16_t data;
   11791 
   11792 	reg = CSR_READ(sc, WMREG_EECD);
   11793 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11794 
   11795 	/* Read the size of NVM from EECD by default */
   11796 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11797 	switch (sc->sc_type) {
   11798 	case WM_T_82541:
   11799 	case WM_T_82541_2:
   11800 	case WM_T_82547:
   11801 	case WM_T_82547_2:
   11802 		/* Set dummy value to access EEPROM */
   11803 		sc->sc_nvm_wordsize = 64;
   11804 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11805 			aprint_error_dev(sc->sc_dev,
   11806 			    "%s: failed to read EEPROM size\n", __func__);
   11807 		}
   11808 		reg = data;
   11809 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11810 		if (size == 0)
   11811 			size = 6; /* 64 word size */
   11812 		else
   11813 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11814 		break;
   11815 	case WM_T_80003:
   11816 	case WM_T_82571:
   11817 	case WM_T_82572:
   11818 	case WM_T_82573: /* SPI case */
   11819 	case WM_T_82574: /* SPI case */
   11820 	case WM_T_82583: /* SPI case */
   11821 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11822 		if (size > 14)
   11823 			size = 14;
   11824 		break;
   11825 	case WM_T_82575:
   11826 	case WM_T_82576:
   11827 	case WM_T_82580:
   11828 	case WM_T_I350:
   11829 	case WM_T_I354:
   11830 	case WM_T_I210:
   11831 	case WM_T_I211:
   11832 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11833 		if (size > 15)
   11834 			size = 15;
   11835 		break;
   11836 	default:
   11837 		aprint_error_dev(sc->sc_dev,
   11838 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11839 		return -1;
   11840 		break;
   11841 	}
   11842 
   11843 	sc->sc_nvm_wordsize = 1 << size;
   11844 
   11845 	return 0;
   11846 }
   11847 
   11848 /*
   11849  * wm_nvm_ready_spi:
   11850  *
   11851  *	Wait for a SPI EEPROM to be ready for commands.
   11852  */
   11853 static int
   11854 wm_nvm_ready_spi(struct wm_softc *sc)
   11855 {
   11856 	uint32_t val;
   11857 	int usec;
   11858 
   11859 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11860 		device_xname(sc->sc_dev), __func__));
   11861 
   11862 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11863 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11864 		wm_eeprom_recvbits(sc, &val, 8);
   11865 		if ((val & SPI_SR_RDY) == 0)
   11866 			break;
   11867 	}
   11868 	if (usec >= SPI_MAX_RETRIES) {
   11869 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11870 		return -1;
   11871 	}
   11872 	return 0;
   11873 }
   11874 
   11875 /*
   11876  * wm_nvm_read_spi:
   11877  *
   11878  *	Read a work from the EEPROM using the SPI protocol.
   11879  */
   11880 static int
   11881 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11882 {
   11883 	uint32_t reg, val;
   11884 	int i;
   11885 	uint8_t opc;
   11886 	int rv = 0;
   11887 
   11888 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11889 		device_xname(sc->sc_dev), __func__));
   11890 
   11891 	if (sc->nvm.acquire(sc) != 0)
   11892 		return -1;
   11893 
   11894 	/* Clear SK and CS. */
   11895 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11896 	CSR_WRITE(sc, WMREG_EECD, reg);
   11897 	CSR_WRITE_FLUSH(sc);
   11898 	delay(2);
   11899 
   11900 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11901 		goto out;
   11902 
   11903 	/* Toggle CS to flush commands. */
   11904 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11905 	CSR_WRITE_FLUSH(sc);
   11906 	delay(2);
   11907 	CSR_WRITE(sc, WMREG_EECD, reg);
   11908 	CSR_WRITE_FLUSH(sc);
   11909 	delay(2);
   11910 
   11911 	opc = SPI_OPC_READ;
   11912 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11913 		opc |= SPI_OPC_A8;
   11914 
   11915 	wm_eeprom_sendbits(sc, opc, 8);
   11916 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11917 
   11918 	for (i = 0; i < wordcnt; i++) {
   11919 		wm_eeprom_recvbits(sc, &val, 16);
   11920 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11921 	}
   11922 
   11923 	/* Raise CS and clear SK. */
   11924 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11925 	CSR_WRITE(sc, WMREG_EECD, reg);
   11926 	CSR_WRITE_FLUSH(sc);
   11927 	delay(2);
   11928 
   11929 out:
   11930 	sc->nvm.release(sc);
   11931 	return rv;
   11932 }
   11933 
   11934 /* Using with EERD */
   11935 
   11936 static int
   11937 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11938 {
   11939 	uint32_t attempts = 100000;
   11940 	uint32_t i, reg = 0;
   11941 	int32_t done = -1;
   11942 
   11943 	for (i = 0; i < attempts; i++) {
   11944 		reg = CSR_READ(sc, rw);
   11945 
   11946 		if (reg & EERD_DONE) {
   11947 			done = 0;
   11948 			break;
   11949 		}
   11950 		delay(5);
   11951 	}
   11952 
   11953 	return done;
   11954 }
   11955 
   11956 static int
   11957 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   11958 {
   11959 	int i, eerd = 0;
   11960 	int rv = 0;
   11961 
   11962 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11963 		device_xname(sc->sc_dev), __func__));
   11964 
   11965 	if (sc->nvm.acquire(sc) != 0)
   11966 		return -1;
   11967 
   11968 	for (i = 0; i < wordcnt; i++) {
   11969 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11970 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11971 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11972 		if (rv != 0) {
   11973 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11974 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11975 			break;
   11976 		}
   11977 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11978 	}
   11979 
   11980 	sc->nvm.release(sc);
   11981 	return rv;
   11982 }
   11983 
   11984 /* Flash */
   11985 
   11986 static int
   11987 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11988 {
   11989 	uint32_t eecd;
   11990 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11991 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11992 	uint32_t nvm_dword = 0;
   11993 	uint8_t sig_byte = 0;
   11994  	int rv;
   11995 
   11996 	switch (sc->sc_type) {
   11997 	case WM_T_PCH_SPT:
   11998 	case WM_T_PCH_CNP:
   11999 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12000 		act_offset = ICH_NVM_SIG_WORD * 2;
   12001 
   12002 		/* set bank to 0 in case flash read fails. */
   12003 		*bank = 0;
   12004 
   12005 		/* Check bank 0 */
   12006 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12007 		if (rv != 0)
   12008 			return rv;
   12009 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12010 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12011 			*bank = 0;
   12012 			return 0;
   12013 		}
   12014 
   12015 		/* Check bank 1 */
   12016 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12017 		    &nvm_dword);
   12018 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12019 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12020 			*bank = 1;
   12021 			return 0;
   12022 		}
   12023 		aprint_error_dev(sc->sc_dev,
   12024 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12025 		return -1;
   12026 	case WM_T_ICH8:
   12027 	case WM_T_ICH9:
   12028 		eecd = CSR_READ(sc, WMREG_EECD);
   12029 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12030 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12031 			return 0;
   12032 		}
   12033 		/* FALLTHROUGH */
   12034 	default:
   12035 		/* Default to 0 */
   12036 		*bank = 0;
   12037 
   12038 		/* Check bank 0 */
   12039 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12040 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12041 			*bank = 0;
   12042 			return 0;
   12043 		}
   12044 
   12045 		/* Check bank 1 */
   12046 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12047 		    &sig_byte);
   12048 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12049 			*bank = 1;
   12050 			return 0;
   12051 		}
   12052 	}
   12053 
   12054 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12055 		device_xname(sc->sc_dev)));
   12056 	return -1;
   12057 }
   12058 
   12059 /******************************************************************************
   12060  * This function does initial flash setup so that a new read/write/erase cycle
   12061  * can be started.
   12062  *
   12063  * sc - The pointer to the hw structure
   12064  ****************************************************************************/
   12065 static int32_t
   12066 wm_ich8_cycle_init(struct wm_softc *sc)
   12067 {
   12068 	uint16_t hsfsts;
   12069 	int32_t error = 1;
   12070 	int32_t i     = 0;
   12071 
   12072 	if (sc->sc_type >= WM_T_PCH_SPT)
   12073 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12074 	else
   12075 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12076 
   12077 	/* May be check the Flash Des Valid bit in Hw status */
   12078 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12079 		return error;
   12080 	}
   12081 
   12082 	/* Clear FCERR in Hw status by writing 1 */
   12083 	/* Clear DAEL in Hw status by writing a 1 */
   12084 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12085 
   12086 	if (sc->sc_type >= WM_T_PCH_SPT)
   12087 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12088 	else
   12089 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12090 
   12091 	/*
   12092 	 * Either we should have a hardware SPI cycle in progress bit to check
   12093 	 * against, in order to start a new cycle or FDONE bit should be
   12094 	 * changed in the hardware so that it is 1 after harware reset, which
   12095 	 * can then be used as an indication whether a cycle is in progress or
   12096 	 * has been completed .. we should also have some software semaphore
   12097 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12098 	 * threads access to those bits can be sequentiallized or a way so that
   12099 	 * 2 threads dont start the cycle at the same time
   12100 	 */
   12101 
   12102 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12103 		/*
   12104 		 * There is no cycle running at present, so we can start a
   12105 		 * cycle
   12106 		 */
   12107 
   12108 		/* Begin by setting Flash Cycle Done. */
   12109 		hsfsts |= HSFSTS_DONE;
   12110 		if (sc->sc_type >= WM_T_PCH_SPT)
   12111 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12112 			    hsfsts & 0xffffUL);
   12113 		else
   12114 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12115 		error = 0;
   12116 	} else {
   12117 		/*
   12118 		 * otherwise poll for sometime so the current cycle has a
   12119 		 * chance to end before giving up.
   12120 		 */
   12121 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12122 			if (sc->sc_type >= WM_T_PCH_SPT)
   12123 				hsfsts = ICH8_FLASH_READ32(sc,
   12124 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12125 			else
   12126 				hsfsts = ICH8_FLASH_READ16(sc,
   12127 				    ICH_FLASH_HSFSTS);
   12128 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12129 				error = 0;
   12130 				break;
   12131 			}
   12132 			delay(1);
   12133 		}
   12134 		if (error == 0) {
   12135 			/*
   12136 			 * Successful in waiting for previous cycle to timeout,
   12137 			 * now set the Flash Cycle Done.
   12138 			 */
   12139 			hsfsts |= HSFSTS_DONE;
   12140 			if (sc->sc_type >= WM_T_PCH_SPT)
   12141 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12142 				    hsfsts & 0xffffUL);
   12143 			else
   12144 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12145 				    hsfsts);
   12146 		}
   12147 	}
   12148 	return error;
   12149 }
   12150 
   12151 /******************************************************************************
   12152  * This function starts a flash cycle and waits for its completion
   12153  *
   12154  * sc - The pointer to the hw structure
   12155  ****************************************************************************/
   12156 static int32_t
   12157 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12158 {
   12159 	uint16_t hsflctl;
   12160 	uint16_t hsfsts;
   12161 	int32_t error = 1;
   12162 	uint32_t i = 0;
   12163 
   12164 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12165 	if (sc->sc_type >= WM_T_PCH_SPT)
   12166 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12167 	else
   12168 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12169 	hsflctl |= HSFCTL_GO;
   12170 	if (sc->sc_type >= WM_T_PCH_SPT)
   12171 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12172 		    (uint32_t)hsflctl << 16);
   12173 	else
   12174 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12175 
   12176 	/* Wait till FDONE bit is set to 1 */
   12177 	do {
   12178 		if (sc->sc_type >= WM_T_PCH_SPT)
   12179 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12180 			    & 0xffffUL;
   12181 		else
   12182 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12183 		if (hsfsts & HSFSTS_DONE)
   12184 			break;
   12185 		delay(1);
   12186 		i++;
   12187 	} while (i < timeout);
   12188 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12189 		error = 0;
   12190 
   12191 	return error;
   12192 }
   12193 
   12194 /******************************************************************************
   12195  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12196  *
   12197  * sc - The pointer to the hw structure
   12198  * index - The index of the byte or word to read.
   12199  * size - Size of data to read, 1=byte 2=word, 4=dword
   12200  * data - Pointer to the word to store the value read.
   12201  *****************************************************************************/
   12202 static int32_t
   12203 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12204     uint32_t size, uint32_t *data)
   12205 {
   12206 	uint16_t hsfsts;
   12207 	uint16_t hsflctl;
   12208 	uint32_t flash_linear_address;
   12209 	uint32_t flash_data = 0;
   12210 	int32_t error = 1;
   12211 	int32_t count = 0;
   12212 
   12213 	if (size < 1  || size > 4 || data == 0x0 ||
   12214 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12215 		return error;
   12216 
   12217 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12218 	    sc->sc_ich8_flash_base;
   12219 
   12220 	do {
   12221 		delay(1);
   12222 		/* Steps */
   12223 		error = wm_ich8_cycle_init(sc);
   12224 		if (error)
   12225 			break;
   12226 
   12227 		if (sc->sc_type >= WM_T_PCH_SPT)
   12228 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12229 			    >> 16;
   12230 		else
   12231 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12232 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12233 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12234 		    & HSFCTL_BCOUNT_MASK;
   12235 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12236 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12237 			/*
   12238 			 * In SPT, This register is in Lan memory space, not
   12239 			 * flash. Therefore, only 32 bit access is supported.
   12240 			 */
   12241 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12242 			    (uint32_t)hsflctl << 16);
   12243 		} else
   12244 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12245 
   12246 		/*
   12247 		 * Write the last 24 bits of index into Flash Linear address
   12248 		 * field in Flash Address
   12249 		 */
   12250 		/* TODO: TBD maybe check the index against the size of flash */
   12251 
   12252 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12253 
   12254 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12255 
   12256 		/*
   12257 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12258 		 * the whole sequence a few more times, else read in (shift in)
   12259 		 * the Flash Data0, the order is least significant byte first
   12260 		 * msb to lsb
   12261 		 */
   12262 		if (error == 0) {
   12263 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12264 			if (size == 1)
   12265 				*data = (uint8_t)(flash_data & 0x000000FF);
   12266 			else if (size == 2)
   12267 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12268 			else if (size == 4)
   12269 				*data = (uint32_t)flash_data;
   12270 			break;
   12271 		} else {
   12272 			/*
   12273 			 * If we've gotten here, then things are probably
   12274 			 * completely hosed, but if the error condition is
   12275 			 * detected, it won't hurt to give it another try...
   12276 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12277 			 */
   12278 			if (sc->sc_type >= WM_T_PCH_SPT)
   12279 				hsfsts = ICH8_FLASH_READ32(sc,
   12280 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12281 			else
   12282 				hsfsts = ICH8_FLASH_READ16(sc,
   12283 				    ICH_FLASH_HSFSTS);
   12284 
   12285 			if (hsfsts & HSFSTS_ERR) {
   12286 				/* Repeat for some time before giving up. */
   12287 				continue;
   12288 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12289 				break;
   12290 		}
   12291 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12292 
   12293 	return error;
   12294 }
   12295 
   12296 /******************************************************************************
   12297  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12298  *
   12299  * sc - pointer to wm_hw structure
   12300  * index - The index of the byte to read.
   12301  * data - Pointer to a byte to store the value read.
   12302  *****************************************************************************/
   12303 static int32_t
   12304 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12305 {
   12306 	int32_t status;
   12307 	uint32_t word = 0;
   12308 
   12309 	status = wm_read_ich8_data(sc, index, 1, &word);
   12310 	if (status == 0)
   12311 		*data = (uint8_t)word;
   12312 	else
   12313 		*data = 0;
   12314 
   12315 	return status;
   12316 }
   12317 
   12318 /******************************************************************************
   12319  * Reads a word from the NVM using the ICH8 flash access registers.
   12320  *
   12321  * sc - pointer to wm_hw structure
   12322  * index - The starting byte index of the word to read.
   12323  * data - Pointer to a word to store the value read.
   12324  *****************************************************************************/
   12325 static int32_t
   12326 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12327 {
   12328 	int32_t status;
   12329 	uint32_t word = 0;
   12330 
   12331 	status = wm_read_ich8_data(sc, index, 2, &word);
   12332 	if (status == 0)
   12333 		*data = (uint16_t)word;
   12334 	else
   12335 		*data = 0;
   12336 
   12337 	return status;
   12338 }
   12339 
   12340 /******************************************************************************
   12341  * Reads a dword from the NVM using the ICH8 flash access registers.
   12342  *
   12343  * sc - pointer to wm_hw structure
   12344  * index - The starting byte index of the word to read.
   12345  * data - Pointer to a word to store the value read.
   12346  *****************************************************************************/
   12347 static int32_t
   12348 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12349 {
   12350 	int32_t status;
   12351 
   12352 	status = wm_read_ich8_data(sc, index, 4, data);
   12353 	return status;
   12354 }
   12355 
   12356 /******************************************************************************
   12357  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12358  * register.
   12359  *
   12360  * sc - Struct containing variables accessed by shared code
   12361  * offset - offset of word in the EEPROM to read
   12362  * data - word read from the EEPROM
   12363  * words - number of words to read
   12364  *****************************************************************************/
   12365 static int
   12366 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12367 {
   12368 	int32_t  rv = 0;
   12369 	uint32_t flash_bank = 0;
   12370 	uint32_t act_offset = 0;
   12371 	uint32_t bank_offset = 0;
   12372 	uint16_t word = 0;
   12373 	uint16_t i = 0;
   12374 
   12375 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12376 		device_xname(sc->sc_dev), __func__));
   12377 
   12378 	if (sc->nvm.acquire(sc) != 0)
   12379 		return -1;
   12380 
   12381 	/*
   12382 	 * We need to know which is the valid flash bank.  In the event
   12383 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12384 	 * managing flash_bank.  So it cannot be trusted and needs
   12385 	 * to be updated with each read.
   12386 	 */
   12387 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12388 	if (rv) {
   12389 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12390 			device_xname(sc->sc_dev)));
   12391 		flash_bank = 0;
   12392 	}
   12393 
   12394 	/*
   12395 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12396 	 * size
   12397 	 */
   12398 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12399 
   12400 	for (i = 0; i < words; i++) {
   12401 		/* The NVM part needs a byte offset, hence * 2 */
   12402 		act_offset = bank_offset + ((offset + i) * 2);
   12403 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12404 		if (rv) {
   12405 			aprint_error_dev(sc->sc_dev,
   12406 			    "%s: failed to read NVM\n", __func__);
   12407 			break;
   12408 		}
   12409 		data[i] = word;
   12410 	}
   12411 
   12412 	sc->nvm.release(sc);
   12413 	return rv;
   12414 }
   12415 
   12416 /******************************************************************************
   12417  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12418  * register.
   12419  *
   12420  * sc - Struct containing variables accessed by shared code
   12421  * offset - offset of word in the EEPROM to read
   12422  * data - word read from the EEPROM
   12423  * words - number of words to read
   12424  *****************************************************************************/
   12425 static int
   12426 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12427 {
   12428 	int32_t  rv = 0;
   12429 	uint32_t flash_bank = 0;
   12430 	uint32_t act_offset = 0;
   12431 	uint32_t bank_offset = 0;
   12432 	uint32_t dword = 0;
   12433 	uint16_t i = 0;
   12434 
   12435 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12436 		device_xname(sc->sc_dev), __func__));
   12437 
   12438 	if (sc->nvm.acquire(sc) != 0)
   12439 		return -1;
   12440 
   12441 	/*
   12442 	 * We need to know which is the valid flash bank.  In the event
   12443 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12444 	 * managing flash_bank.  So it cannot be trusted and needs
   12445 	 * to be updated with each read.
   12446 	 */
   12447 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12448 	if (rv) {
   12449 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12450 			device_xname(sc->sc_dev)));
   12451 		flash_bank = 0;
   12452 	}
   12453 
   12454 	/*
   12455 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12456 	 * size
   12457 	 */
   12458 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12459 
   12460 	for (i = 0; i < words; i++) {
   12461 		/* The NVM part needs a byte offset, hence * 2 */
   12462 		act_offset = bank_offset + ((offset + i) * 2);
   12463 		/* but we must read dword aligned, so mask ... */
   12464 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12465 		if (rv) {
   12466 			aprint_error_dev(sc->sc_dev,
   12467 			    "%s: failed to read NVM\n", __func__);
   12468 			break;
   12469 		}
   12470 		/* ... and pick out low or high word */
   12471 		if ((act_offset & 0x2) == 0)
   12472 			data[i] = (uint16_t)(dword & 0xFFFF);
   12473 		else
   12474 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12475 	}
   12476 
   12477 	sc->nvm.release(sc);
   12478 	return rv;
   12479 }
   12480 
   12481 /* iNVM */
   12482 
   12483 static int
   12484 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12485 {
   12486 	int32_t  rv = 0;
   12487 	uint32_t invm_dword;
   12488 	uint16_t i;
   12489 	uint8_t record_type, word_address;
   12490 
   12491 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12492 		device_xname(sc->sc_dev), __func__));
   12493 
   12494 	for (i = 0; i < INVM_SIZE; i++) {
   12495 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12496 		/* Get record type */
   12497 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12498 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12499 			break;
   12500 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12501 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12502 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12503 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12504 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12505 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12506 			if (word_address == address) {
   12507 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12508 				rv = 0;
   12509 				break;
   12510 			}
   12511 		}
   12512 	}
   12513 
   12514 	return rv;
   12515 }
   12516 
   12517 static int
   12518 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12519 {
   12520 	int rv = 0;
   12521 	int i;
   12522 
   12523 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12524 		device_xname(sc->sc_dev), __func__));
   12525 
   12526 	if (sc->nvm.acquire(sc) != 0)
   12527 		return -1;
   12528 
   12529 	for (i = 0; i < words; i++) {
   12530 		switch (offset + i) {
   12531 		case NVM_OFF_MACADDR:
   12532 		case NVM_OFF_MACADDR1:
   12533 		case NVM_OFF_MACADDR2:
   12534 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12535 			if (rv != 0) {
   12536 				data[i] = 0xffff;
   12537 				rv = -1;
   12538 			}
   12539 			break;
   12540 		case NVM_OFF_CFG2:
   12541 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12542 			if (rv != 0) {
   12543 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12544 				rv = 0;
   12545 			}
   12546 			break;
   12547 		case NVM_OFF_CFG4:
   12548 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12549 			if (rv != 0) {
   12550 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12551 				rv = 0;
   12552 			}
   12553 			break;
   12554 		case NVM_OFF_LED_1_CFG:
   12555 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12556 			if (rv != 0) {
   12557 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12558 				rv = 0;
   12559 			}
   12560 			break;
   12561 		case NVM_OFF_LED_0_2_CFG:
   12562 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12563 			if (rv != 0) {
   12564 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12565 				rv = 0;
   12566 			}
   12567 			break;
   12568 		case NVM_OFF_ID_LED_SETTINGS:
   12569 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12570 			if (rv != 0) {
   12571 				*data = ID_LED_RESERVED_FFFF;
   12572 				rv = 0;
   12573 			}
   12574 			break;
   12575 		default:
   12576 			DPRINTF(WM_DEBUG_NVM,
   12577 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12578 			*data = NVM_RESERVED_WORD;
   12579 			break;
   12580 		}
   12581 	}
   12582 
   12583 	sc->nvm.release(sc);
   12584 	return rv;
   12585 }
   12586 
   12587 /* Lock, detecting NVM type, validate checksum, version and read */
   12588 
   12589 static int
   12590 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12591 {
   12592 	uint32_t eecd = 0;
   12593 
   12594 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12595 	    || sc->sc_type == WM_T_82583) {
   12596 		eecd = CSR_READ(sc, WMREG_EECD);
   12597 
   12598 		/* Isolate bits 15 & 16 */
   12599 		eecd = ((eecd >> 15) & 0x03);
   12600 
   12601 		/* If both bits are set, device is Flash type */
   12602 		if (eecd == 0x03)
   12603 			return 0;
   12604 	}
   12605 	return 1;
   12606 }
   12607 
   12608 static int
   12609 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12610 {
   12611 	uint32_t eec;
   12612 
   12613 	eec = CSR_READ(sc, WMREG_EEC);
   12614 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12615 		return 1;
   12616 
   12617 	return 0;
   12618 }
   12619 
   12620 /*
   12621  * wm_nvm_validate_checksum
   12622  *
   12623  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12624  */
   12625 static int
   12626 wm_nvm_validate_checksum(struct wm_softc *sc)
   12627 {
   12628 	uint16_t checksum;
   12629 	uint16_t eeprom_data;
   12630 #ifdef WM_DEBUG
   12631 	uint16_t csum_wordaddr, valid_checksum;
   12632 #endif
   12633 	int i;
   12634 
   12635 	checksum = 0;
   12636 
   12637 	/* Don't check for I211 */
   12638 	if (sc->sc_type == WM_T_I211)
   12639 		return 0;
   12640 
   12641 #ifdef WM_DEBUG
   12642 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12643 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12644 		csum_wordaddr = NVM_OFF_COMPAT;
   12645 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12646 	} else {
   12647 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12648 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12649 	}
   12650 
   12651 	/* Dump EEPROM image for debug */
   12652 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12653 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12654 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12655 		/* XXX PCH_SPT? */
   12656 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12657 		if ((eeprom_data & valid_checksum) == 0) {
   12658 			DPRINTF(WM_DEBUG_NVM,
   12659 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12660 				device_xname(sc->sc_dev), eeprom_data,
   12661 				    valid_checksum));
   12662 		}
   12663 	}
   12664 
   12665 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12666 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12667 		for (i = 0; i < NVM_SIZE; i++) {
   12668 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12669 				printf("XXXX ");
   12670 			else
   12671 				printf("%04hx ", eeprom_data);
   12672 			if (i % 8 == 7)
   12673 				printf("\n");
   12674 		}
   12675 	}
   12676 
   12677 #endif /* WM_DEBUG */
   12678 
   12679 	for (i = 0; i < NVM_SIZE; i++) {
   12680 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12681 			return 1;
   12682 		checksum += eeprom_data;
   12683 	}
   12684 
   12685 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12686 #ifdef WM_DEBUG
   12687 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12688 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12689 #endif
   12690 	}
   12691 
   12692 	return 0;
   12693 }
   12694 
   12695 static void
   12696 wm_nvm_version_invm(struct wm_softc *sc)
   12697 {
   12698 	uint32_t dword;
   12699 
   12700 	/*
   12701 	 * Linux's code to decode version is very strange, so we don't
   12702 	 * obey that algorithm and just use word 61 as the document.
   12703 	 * Perhaps it's not perfect though...
   12704 	 *
   12705 	 * Example:
   12706 	 *
   12707 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12708 	 */
   12709 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12710 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12711 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12712 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12713 }
   12714 
   12715 static void
   12716 wm_nvm_version(struct wm_softc *sc)
   12717 {
   12718 	uint16_t major, minor, build, patch;
   12719 	uint16_t uid0, uid1;
   12720 	uint16_t nvm_data;
   12721 	uint16_t off;
   12722 	bool check_version = false;
   12723 	bool check_optionrom = false;
   12724 	bool have_build = false;
   12725 	bool have_uid = true;
   12726 
   12727 	/*
   12728 	 * Version format:
   12729 	 *
   12730 	 * XYYZ
   12731 	 * X0YZ
   12732 	 * X0YY
   12733 	 *
   12734 	 * Example:
   12735 	 *
   12736 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12737 	 *	82571	0x50a6	5.10.6?
   12738 	 *	82572	0x506a	5.6.10?
   12739 	 *	82572EI	0x5069	5.6.9?
   12740 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12741 	 *		0x2013	2.1.3?
   12742 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12743 	 */
   12744 
   12745 	/*
   12746 	 * XXX
   12747 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12748 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12749 	 */
   12750 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12751 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12752 		have_uid = false;
   12753 
   12754 	switch (sc->sc_type) {
   12755 	case WM_T_82571:
   12756 	case WM_T_82572:
   12757 	case WM_T_82574:
   12758 	case WM_T_82583:
   12759 		check_version = true;
   12760 		check_optionrom = true;
   12761 		have_build = true;
   12762 		break;
   12763 	case WM_T_82575:
   12764 	case WM_T_82576:
   12765 	case WM_T_82580:
   12766 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12767 			check_version = true;
   12768 		break;
   12769 	case WM_T_I211:
   12770 		wm_nvm_version_invm(sc);
   12771 		have_uid = false;
   12772 		goto printver;
   12773 	case WM_T_I210:
   12774 		if (!wm_nvm_flash_presence_i210(sc)) {
   12775 			wm_nvm_version_invm(sc);
   12776 			have_uid = false;
   12777 			goto printver;
   12778 		}
   12779 		/* FALLTHROUGH */
   12780 	case WM_T_I350:
   12781 	case WM_T_I354:
   12782 		check_version = true;
   12783 		check_optionrom = true;
   12784 		break;
   12785 	default:
   12786 		return;
   12787 	}
   12788 	if (check_version
   12789 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12790 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12791 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12792 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12793 			build = nvm_data & NVM_BUILD_MASK;
   12794 			have_build = true;
   12795 		} else
   12796 			minor = nvm_data & 0x00ff;
   12797 
   12798 		/* Decimal */
   12799 		minor = (minor / 16) * 10 + (minor % 16);
   12800 		sc->sc_nvm_ver_major = major;
   12801 		sc->sc_nvm_ver_minor = minor;
   12802 
   12803 printver:
   12804 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12805 		    sc->sc_nvm_ver_minor);
   12806 		if (have_build) {
   12807 			sc->sc_nvm_ver_build = build;
   12808 			aprint_verbose(".%d", build);
   12809 		}
   12810 	}
   12811 
   12812 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12813 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12814 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12815 		/* Option ROM Version */
   12816 		if ((off != 0x0000) && (off != 0xffff)) {
   12817 			int rv;
   12818 
   12819 			off += NVM_COMBO_VER_OFF;
   12820 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12821 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12822 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12823 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12824 				/* 16bits */
   12825 				major = uid0 >> 8;
   12826 				build = (uid0 << 8) | (uid1 >> 8);
   12827 				patch = uid1 & 0x00ff;
   12828 				aprint_verbose(", option ROM Version %d.%d.%d",
   12829 				    major, build, patch);
   12830 			}
   12831 		}
   12832 	}
   12833 
   12834 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12835 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12836 }
   12837 
   12838 /*
   12839  * wm_nvm_read:
   12840  *
   12841  *	Read data from the serial EEPROM.
   12842  */
   12843 static int
   12844 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12845 {
   12846 	int rv;
   12847 
   12848 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12849 		device_xname(sc->sc_dev), __func__));
   12850 
   12851 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12852 		return -1;
   12853 
   12854 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12855 
   12856 	return rv;
   12857 }
   12858 
   12859 /*
   12860  * Hardware semaphores.
   12861  * Very complexed...
   12862  */
   12863 
   12864 static int
   12865 wm_get_null(struct wm_softc *sc)
   12866 {
   12867 
   12868 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12869 		device_xname(sc->sc_dev), __func__));
   12870 	return 0;
   12871 }
   12872 
   12873 static void
   12874 wm_put_null(struct wm_softc *sc)
   12875 {
   12876 
   12877 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12878 		device_xname(sc->sc_dev), __func__));
   12879 	return;
   12880 }
   12881 
   12882 static int
   12883 wm_get_eecd(struct wm_softc *sc)
   12884 {
   12885 	uint32_t reg;
   12886 	int x;
   12887 
   12888 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12889 		device_xname(sc->sc_dev), __func__));
   12890 
   12891 	reg = CSR_READ(sc, WMREG_EECD);
   12892 
   12893 	/* Request EEPROM access. */
   12894 	reg |= EECD_EE_REQ;
   12895 	CSR_WRITE(sc, WMREG_EECD, reg);
   12896 
   12897 	/* ..and wait for it to be granted. */
   12898 	for (x = 0; x < 1000; x++) {
   12899 		reg = CSR_READ(sc, WMREG_EECD);
   12900 		if (reg & EECD_EE_GNT)
   12901 			break;
   12902 		delay(5);
   12903 	}
   12904 	if ((reg & EECD_EE_GNT) == 0) {
   12905 		aprint_error_dev(sc->sc_dev,
   12906 		    "could not acquire EEPROM GNT\n");
   12907 		reg &= ~EECD_EE_REQ;
   12908 		CSR_WRITE(sc, WMREG_EECD, reg);
   12909 		return -1;
   12910 	}
   12911 
   12912 	return 0;
   12913 }
   12914 
   12915 static void
   12916 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12917 {
   12918 
   12919 	*eecd |= EECD_SK;
   12920 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12921 	CSR_WRITE_FLUSH(sc);
   12922 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12923 		delay(1);
   12924 	else
   12925 		delay(50);
   12926 }
   12927 
   12928 static void
   12929 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12930 {
   12931 
   12932 	*eecd &= ~EECD_SK;
   12933 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12934 	CSR_WRITE_FLUSH(sc);
   12935 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12936 		delay(1);
   12937 	else
   12938 		delay(50);
   12939 }
   12940 
   12941 static void
   12942 wm_put_eecd(struct wm_softc *sc)
   12943 {
   12944 	uint32_t reg;
   12945 
   12946 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12947 		device_xname(sc->sc_dev), __func__));
   12948 
   12949 	/* Stop nvm */
   12950 	reg = CSR_READ(sc, WMREG_EECD);
   12951 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12952 		/* Pull CS high */
   12953 		reg |= EECD_CS;
   12954 		wm_nvm_eec_clock_lower(sc, &reg);
   12955 	} else {
   12956 		/* CS on Microwire is active-high */
   12957 		reg &= ~(EECD_CS | EECD_DI);
   12958 		CSR_WRITE(sc, WMREG_EECD, reg);
   12959 		wm_nvm_eec_clock_raise(sc, &reg);
   12960 		wm_nvm_eec_clock_lower(sc, &reg);
   12961 	}
   12962 
   12963 	reg = CSR_READ(sc, WMREG_EECD);
   12964 	reg &= ~EECD_EE_REQ;
   12965 	CSR_WRITE(sc, WMREG_EECD, reg);
   12966 
   12967 	return;
   12968 }
   12969 
   12970 /*
   12971  * Get hardware semaphore.
   12972  * Same as e1000_get_hw_semaphore_generic()
   12973  */
   12974 static int
   12975 wm_get_swsm_semaphore(struct wm_softc *sc)
   12976 {
   12977 	int32_t timeout;
   12978 	uint32_t swsm;
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 	KASSERT(sc->sc_nvm_wordsize > 0);
   12983 
   12984 retry:
   12985 	/* Get the SW semaphore. */
   12986 	timeout = sc->sc_nvm_wordsize + 1;
   12987 	while (timeout) {
   12988 		swsm = CSR_READ(sc, WMREG_SWSM);
   12989 
   12990 		if ((swsm & SWSM_SMBI) == 0)
   12991 			break;
   12992 
   12993 		delay(50);
   12994 		timeout--;
   12995 	}
   12996 
   12997 	if (timeout == 0) {
   12998 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12999 			/*
   13000 			 * In rare circumstances, the SW semaphore may already
   13001 			 * be held unintentionally. Clear the semaphore once
   13002 			 * before giving up.
   13003 			 */
   13004 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13005 			wm_put_swsm_semaphore(sc);
   13006 			goto retry;
   13007 		}
   13008 		aprint_error_dev(sc->sc_dev,
   13009 		    "could not acquire SWSM SMBI\n");
   13010 		return 1;
   13011 	}
   13012 
   13013 	/* Get the FW semaphore. */
   13014 	timeout = sc->sc_nvm_wordsize + 1;
   13015 	while (timeout) {
   13016 		swsm = CSR_READ(sc, WMREG_SWSM);
   13017 		swsm |= SWSM_SWESMBI;
   13018 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13019 		/* If we managed to set the bit we got the semaphore. */
   13020 		swsm = CSR_READ(sc, WMREG_SWSM);
   13021 		if (swsm & SWSM_SWESMBI)
   13022 			break;
   13023 
   13024 		delay(50);
   13025 		timeout--;
   13026 	}
   13027 
   13028 	if (timeout == 0) {
   13029 		aprint_error_dev(sc->sc_dev,
   13030 		    "could not acquire SWSM SWESMBI\n");
   13031 		/* Release semaphores */
   13032 		wm_put_swsm_semaphore(sc);
   13033 		return 1;
   13034 	}
   13035 	return 0;
   13036 }
   13037 
   13038 /*
   13039  * Put hardware semaphore.
   13040  * Same as e1000_put_hw_semaphore_generic()
   13041  */
   13042 static void
   13043 wm_put_swsm_semaphore(struct wm_softc *sc)
   13044 {
   13045 	uint32_t swsm;
   13046 
   13047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13048 		device_xname(sc->sc_dev), __func__));
   13049 
   13050 	swsm = CSR_READ(sc, WMREG_SWSM);
   13051 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13052 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13053 }
   13054 
   13055 /*
   13056  * Get SW/FW semaphore.
   13057  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13058  */
   13059 static int
   13060 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13061 {
   13062 	uint32_t swfw_sync;
   13063 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13064 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13065 	int timeout;
   13066 
   13067 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13068 		device_xname(sc->sc_dev), __func__));
   13069 
   13070 	if (sc->sc_type == WM_T_80003)
   13071 		timeout = 50;
   13072 	else
   13073 		timeout = 200;
   13074 
   13075 	while (timeout) {
   13076 		if (wm_get_swsm_semaphore(sc)) {
   13077 			aprint_error_dev(sc->sc_dev,
   13078 			    "%s: failed to get semaphore\n",
   13079 			    __func__);
   13080 			return 1;
   13081 		}
   13082 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13083 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13084 			swfw_sync |= swmask;
   13085 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13086 			wm_put_swsm_semaphore(sc);
   13087 			return 0;
   13088 		}
   13089 		wm_put_swsm_semaphore(sc);
   13090 		delay(5000);
   13091 		timeout--;
   13092 	}
   13093 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13094 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13095 	return 1;
   13096 }
   13097 
   13098 static void
   13099 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13100 {
   13101 	uint32_t swfw_sync;
   13102 
   13103 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13104 		device_xname(sc->sc_dev), __func__));
   13105 
   13106 	while (wm_get_swsm_semaphore(sc) != 0)
   13107 		continue;
   13108 
   13109 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13110 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13111 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13112 
   13113 	wm_put_swsm_semaphore(sc);
   13114 }
   13115 
   13116 static int
   13117 wm_get_nvm_80003(struct wm_softc *sc)
   13118 {
   13119 	int rv;
   13120 
   13121 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13122 		device_xname(sc->sc_dev), __func__));
   13123 
   13124 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13125 		aprint_error_dev(sc->sc_dev,
   13126 		    "%s: failed to get semaphore(SWFW)\n",
   13127 		    __func__);
   13128 		return rv;
   13129 	}
   13130 
   13131 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13132 	    && (rv = wm_get_eecd(sc)) != 0) {
   13133 		aprint_error_dev(sc->sc_dev,
   13134 		    "%s: failed to get semaphore(EECD)\n",
   13135 		    __func__);
   13136 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13137 		return rv;
   13138 	}
   13139 
   13140 	return 0;
   13141 }
   13142 
   13143 static void
   13144 wm_put_nvm_80003(struct wm_softc *sc)
   13145 {
   13146 
   13147 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13148 		device_xname(sc->sc_dev), __func__));
   13149 
   13150 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13151 		wm_put_eecd(sc);
   13152 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13153 }
   13154 
   13155 static int
   13156 wm_get_nvm_82571(struct wm_softc *sc)
   13157 {
   13158 	int rv;
   13159 
   13160 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13161 		device_xname(sc->sc_dev), __func__));
   13162 
   13163 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13164 		return rv;
   13165 
   13166 	switch (sc->sc_type) {
   13167 	case WM_T_82573:
   13168 		break;
   13169 	default:
   13170 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13171 			rv = wm_get_eecd(sc);
   13172 		break;
   13173 	}
   13174 
   13175 	if (rv != 0) {
   13176 		aprint_error_dev(sc->sc_dev,
   13177 		    "%s: failed to get semaphore\n",
   13178 		    __func__);
   13179 		wm_put_swsm_semaphore(sc);
   13180 	}
   13181 
   13182 	return rv;
   13183 }
   13184 
   13185 static void
   13186 wm_put_nvm_82571(struct wm_softc *sc)
   13187 {
   13188 
   13189 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13190 		device_xname(sc->sc_dev), __func__));
   13191 
   13192 	switch (sc->sc_type) {
   13193 	case WM_T_82573:
   13194 		break;
   13195 	default:
   13196 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13197 			wm_put_eecd(sc);
   13198 		break;
   13199 	}
   13200 
   13201 	wm_put_swsm_semaphore(sc);
   13202 }
   13203 
   13204 static int
   13205 wm_get_phy_82575(struct wm_softc *sc)
   13206 {
   13207 
   13208 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13209 		device_xname(sc->sc_dev), __func__));
   13210 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13211 }
   13212 
   13213 static void
   13214 wm_put_phy_82575(struct wm_softc *sc)
   13215 {
   13216 
   13217 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13218 		device_xname(sc->sc_dev), __func__));
   13219 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13220 }
   13221 
   13222 static int
   13223 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13224 {
   13225 	uint32_t ext_ctrl;
   13226 	int timeout = 200;
   13227 
   13228 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13229 		device_xname(sc->sc_dev), __func__));
   13230 
   13231 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13232 	for (timeout = 0; timeout < 200; timeout++) {
   13233 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13234 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13235 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13236 
   13237 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13238 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13239 			return 0;
   13240 		delay(5000);
   13241 	}
   13242 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13243 	    device_xname(sc->sc_dev), ext_ctrl);
   13244 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13245 	return 1;
   13246 }
   13247 
   13248 static void
   13249 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13250 {
   13251 	uint32_t ext_ctrl;
   13252 
   13253 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13254 		device_xname(sc->sc_dev), __func__));
   13255 
   13256 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13257 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13258 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13259 
   13260 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13261 }
   13262 
   13263 static int
   13264 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13265 {
   13266 	uint32_t ext_ctrl;
   13267 	int timeout;
   13268 
   13269 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13270 		device_xname(sc->sc_dev), __func__));
   13271 	mutex_enter(sc->sc_ich_phymtx);
   13272 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13273 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13274 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13275 			break;
   13276 		delay(1000);
   13277 	}
   13278 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13279 		printf("%s: SW has already locked the resource\n",
   13280 		    device_xname(sc->sc_dev));
   13281 		goto out;
   13282 	}
   13283 
   13284 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13285 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13286 	for (timeout = 0; timeout < 1000; timeout++) {
   13287 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13288 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13289 			break;
   13290 		delay(1000);
   13291 	}
   13292 	if (timeout >= 1000) {
   13293 		printf("%s: failed to acquire semaphore\n",
   13294 		    device_xname(sc->sc_dev));
   13295 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13296 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13297 		goto out;
   13298 	}
   13299 	return 0;
   13300 
   13301 out:
   13302 	mutex_exit(sc->sc_ich_phymtx);
   13303 	return 1;
   13304 }
   13305 
   13306 static void
   13307 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13308 {
   13309 	uint32_t ext_ctrl;
   13310 
   13311 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13312 		device_xname(sc->sc_dev), __func__));
   13313 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13314 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13315 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13316 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13317 	} else {
   13318 		printf("%s: Semaphore unexpectedly released\n",
   13319 		    device_xname(sc->sc_dev));
   13320 	}
   13321 
   13322 	mutex_exit(sc->sc_ich_phymtx);
   13323 }
   13324 
   13325 static int
   13326 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13327 {
   13328 
   13329 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13330 		device_xname(sc->sc_dev), __func__));
   13331 	mutex_enter(sc->sc_ich_nvmmtx);
   13332 
   13333 	return 0;
   13334 }
   13335 
   13336 static void
   13337 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13338 {
   13339 
   13340 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13341 		device_xname(sc->sc_dev), __func__));
   13342 	mutex_exit(sc->sc_ich_nvmmtx);
   13343 }
   13344 
   13345 static int
   13346 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13347 {
   13348 	int i = 0;
   13349 	uint32_t reg;
   13350 
   13351 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13352 		device_xname(sc->sc_dev), __func__));
   13353 
   13354 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13355 	do {
   13356 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13357 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13358 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13359 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13360 			break;
   13361 		delay(2*1000);
   13362 		i++;
   13363 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13364 
   13365 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13366 		wm_put_hw_semaphore_82573(sc);
   13367 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13368 		    device_xname(sc->sc_dev));
   13369 		return -1;
   13370 	}
   13371 
   13372 	return 0;
   13373 }
   13374 
   13375 static void
   13376 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13377 {
   13378 	uint32_t reg;
   13379 
   13380 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13381 		device_xname(sc->sc_dev), __func__));
   13382 
   13383 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13384 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13385 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13386 }
   13387 
   13388 /*
   13389  * Management mode and power management related subroutines.
   13390  * BMC, AMT, suspend/resume and EEE.
   13391  */
   13392 
   13393 #ifdef WM_WOL
   13394 static int
   13395 wm_check_mng_mode(struct wm_softc *sc)
   13396 {
   13397 	int rv;
   13398 
   13399 	switch (sc->sc_type) {
   13400 	case WM_T_ICH8:
   13401 	case WM_T_ICH9:
   13402 	case WM_T_ICH10:
   13403 	case WM_T_PCH:
   13404 	case WM_T_PCH2:
   13405 	case WM_T_PCH_LPT:
   13406 	case WM_T_PCH_SPT:
   13407 	case WM_T_PCH_CNP:
   13408 		rv = wm_check_mng_mode_ich8lan(sc);
   13409 		break;
   13410 	case WM_T_82574:
   13411 	case WM_T_82583:
   13412 		rv = wm_check_mng_mode_82574(sc);
   13413 		break;
   13414 	case WM_T_82571:
   13415 	case WM_T_82572:
   13416 	case WM_T_82573:
   13417 	case WM_T_80003:
   13418 		rv = wm_check_mng_mode_generic(sc);
   13419 		break;
   13420 	default:
   13421 		/* noting to do */
   13422 		rv = 0;
   13423 		break;
   13424 	}
   13425 
   13426 	return rv;
   13427 }
   13428 
   13429 static int
   13430 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13431 {
   13432 	uint32_t fwsm;
   13433 
   13434 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13435 
   13436 	if (((fwsm & FWSM_FW_VALID) != 0)
   13437 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13438 		return 1;
   13439 
   13440 	return 0;
   13441 }
   13442 
   13443 static int
   13444 wm_check_mng_mode_82574(struct wm_softc *sc)
   13445 {
   13446 	uint16_t data;
   13447 
   13448 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13449 
   13450 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13451 		return 1;
   13452 
   13453 	return 0;
   13454 }
   13455 
   13456 static int
   13457 wm_check_mng_mode_generic(struct wm_softc *sc)
   13458 {
   13459 	uint32_t fwsm;
   13460 
   13461 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13462 
   13463 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13464 		return 1;
   13465 
   13466 	return 0;
   13467 }
   13468 #endif /* WM_WOL */
   13469 
   13470 static int
   13471 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13472 {
   13473 	uint32_t manc, fwsm, factps;
   13474 
   13475 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13476 		return 0;
   13477 
   13478 	manc = CSR_READ(sc, WMREG_MANC);
   13479 
   13480 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13481 		device_xname(sc->sc_dev), manc));
   13482 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13483 		return 0;
   13484 
   13485 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13486 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13487 		factps = CSR_READ(sc, WMREG_FACTPS);
   13488 		if (((factps & FACTPS_MNGCG) == 0)
   13489 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13490 			return 1;
   13491 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13492 		uint16_t data;
   13493 
   13494 		factps = CSR_READ(sc, WMREG_FACTPS);
   13495 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13496 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13497 			device_xname(sc->sc_dev), factps, data));
   13498 		if (((factps & FACTPS_MNGCG) == 0)
   13499 		    && ((data & NVM_CFG2_MNGM_MASK)
   13500 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13501 			return 1;
   13502 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13503 	    && ((manc & MANC_ASF_EN) == 0))
   13504 		return 1;
   13505 
   13506 	return 0;
   13507 }
   13508 
   13509 static bool
   13510 wm_phy_resetisblocked(struct wm_softc *sc)
   13511 {
   13512 	bool blocked = false;
   13513 	uint32_t reg;
   13514 	int i = 0;
   13515 
   13516 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13517 		device_xname(sc->sc_dev), __func__));
   13518 
   13519 	switch (sc->sc_type) {
   13520 	case WM_T_ICH8:
   13521 	case WM_T_ICH9:
   13522 	case WM_T_ICH10:
   13523 	case WM_T_PCH:
   13524 	case WM_T_PCH2:
   13525 	case WM_T_PCH_LPT:
   13526 	case WM_T_PCH_SPT:
   13527 	case WM_T_PCH_CNP:
   13528 		do {
   13529 			reg = CSR_READ(sc, WMREG_FWSM);
   13530 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13531 				blocked = true;
   13532 				delay(10*1000);
   13533 				continue;
   13534 			}
   13535 			blocked = false;
   13536 		} while (blocked && (i++ < 30));
   13537 		return blocked;
   13538 		break;
   13539 	case WM_T_82571:
   13540 	case WM_T_82572:
   13541 	case WM_T_82573:
   13542 	case WM_T_82574:
   13543 	case WM_T_82583:
   13544 	case WM_T_80003:
   13545 		reg = CSR_READ(sc, WMREG_MANC);
   13546 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13547 			return true;
   13548 		else
   13549 			return false;
   13550 		break;
   13551 	default:
   13552 		/* no problem */
   13553 		break;
   13554 	}
   13555 
   13556 	return false;
   13557 }
   13558 
   13559 static void
   13560 wm_get_hw_control(struct wm_softc *sc)
   13561 {
   13562 	uint32_t reg;
   13563 
   13564 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13565 		device_xname(sc->sc_dev), __func__));
   13566 
   13567 	if (sc->sc_type == WM_T_82573) {
   13568 		reg = CSR_READ(sc, WMREG_SWSM);
   13569 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13570 	} else if (sc->sc_type >= WM_T_82571) {
   13571 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13572 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13573 	}
   13574 }
   13575 
   13576 static void
   13577 wm_release_hw_control(struct wm_softc *sc)
   13578 {
   13579 	uint32_t reg;
   13580 
   13581 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13582 		device_xname(sc->sc_dev), __func__));
   13583 
   13584 	if (sc->sc_type == WM_T_82573) {
   13585 		reg = CSR_READ(sc, WMREG_SWSM);
   13586 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13587 	} else if (sc->sc_type >= WM_T_82571) {
   13588 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13589 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13590 	}
   13591 }
   13592 
   13593 static void
   13594 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13595 {
   13596 	uint32_t reg;
   13597 
   13598 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13599 		device_xname(sc->sc_dev), __func__));
   13600 
   13601 	if (sc->sc_type < WM_T_PCH2)
   13602 		return;
   13603 
   13604 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13605 
   13606 	if (gate)
   13607 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13608 	else
   13609 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13610 
   13611 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13612 }
   13613 
   13614 static void
   13615 wm_smbustopci(struct wm_softc *sc)
   13616 {
   13617 	uint32_t fwsm, reg;
   13618 	int rv = 0;
   13619 
   13620 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13621 		device_xname(sc->sc_dev), __func__));
   13622 
   13623 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13624 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13625 
   13626 	/* Disable ULP */
   13627 	wm_ulp_disable(sc);
   13628 
   13629 	/* Acquire PHY semaphore */
   13630 	sc->phy.acquire(sc);
   13631 
   13632 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13633 	switch (sc->sc_type) {
   13634 	case WM_T_PCH_LPT:
   13635 	case WM_T_PCH_SPT:
   13636 	case WM_T_PCH_CNP:
   13637 		if (wm_phy_is_accessible_pchlan(sc))
   13638 			break;
   13639 
   13640 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13641 		reg |= CTRL_EXT_FORCE_SMBUS;
   13642 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13643 #if 0
   13644 		/* XXX Isn't this required??? */
   13645 		CSR_WRITE_FLUSH(sc);
   13646 #endif
   13647 		delay(50 * 1000);
   13648 		/* FALLTHROUGH */
   13649 	case WM_T_PCH2:
   13650 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13651 			break;
   13652 		/* FALLTHROUGH */
   13653 	case WM_T_PCH:
   13654 		if (sc->sc_type == WM_T_PCH)
   13655 			if ((fwsm & FWSM_FW_VALID) != 0)
   13656 				break;
   13657 
   13658 		if (wm_phy_resetisblocked(sc) == true) {
   13659 			printf("XXX reset is blocked(3)\n");
   13660 			break;
   13661 		}
   13662 
   13663 		wm_toggle_lanphypc_pch_lpt(sc);
   13664 
   13665 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13666 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13667 				break;
   13668 
   13669 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13670 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13672 
   13673 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13674 				break;
   13675 			rv = -1;
   13676 		}
   13677 		break;
   13678 	default:
   13679 		break;
   13680 	}
   13681 
   13682 	/* Release semaphore */
   13683 	sc->phy.release(sc);
   13684 
   13685 	if (rv == 0) {
   13686 		if (wm_phy_resetisblocked(sc)) {
   13687 			printf("XXX reset is blocked(4)\n");
   13688 			goto out;
   13689 		}
   13690 		wm_reset_phy(sc);
   13691 		if (wm_phy_resetisblocked(sc))
   13692 			printf("XXX reset is blocked(4)\n");
   13693 	}
   13694 
   13695 out:
   13696 	/*
   13697 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13698 	 */
   13699 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13700 		delay(10*1000);
   13701 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13702 	}
   13703 }
   13704 
   13705 static void
   13706 wm_init_manageability(struct wm_softc *sc)
   13707 {
   13708 
   13709 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13710 		device_xname(sc->sc_dev), __func__));
   13711 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13712 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13713 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13714 
   13715 		/* Disable hardware interception of ARP */
   13716 		manc &= ~MANC_ARP_EN;
   13717 
   13718 		/* Enable receiving management packets to the host */
   13719 		if (sc->sc_type >= WM_T_82571) {
   13720 			manc |= MANC_EN_MNG2HOST;
   13721 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13722 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13723 		}
   13724 
   13725 		CSR_WRITE(sc, WMREG_MANC, manc);
   13726 	}
   13727 }
   13728 
   13729 static void
   13730 wm_release_manageability(struct wm_softc *sc)
   13731 {
   13732 
   13733 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13734 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13735 
   13736 		manc |= MANC_ARP_EN;
   13737 		if (sc->sc_type >= WM_T_82571)
   13738 			manc &= ~MANC_EN_MNG2HOST;
   13739 
   13740 		CSR_WRITE(sc, WMREG_MANC, manc);
   13741 	}
   13742 }
   13743 
   13744 static void
   13745 wm_get_wakeup(struct wm_softc *sc)
   13746 {
   13747 
   13748 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13749 	switch (sc->sc_type) {
   13750 	case WM_T_82573:
   13751 	case WM_T_82583:
   13752 		sc->sc_flags |= WM_F_HAS_AMT;
   13753 		/* FALLTHROUGH */
   13754 	case WM_T_80003:
   13755 	case WM_T_82575:
   13756 	case WM_T_82576:
   13757 	case WM_T_82580:
   13758 	case WM_T_I350:
   13759 	case WM_T_I354:
   13760 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13761 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13762 		/* FALLTHROUGH */
   13763 	case WM_T_82541:
   13764 	case WM_T_82541_2:
   13765 	case WM_T_82547:
   13766 	case WM_T_82547_2:
   13767 	case WM_T_82571:
   13768 	case WM_T_82572:
   13769 	case WM_T_82574:
   13770 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13771 		break;
   13772 	case WM_T_ICH8:
   13773 	case WM_T_ICH9:
   13774 	case WM_T_ICH10:
   13775 	case WM_T_PCH:
   13776 	case WM_T_PCH2:
   13777 	case WM_T_PCH_LPT:
   13778 	case WM_T_PCH_SPT:
   13779 	case WM_T_PCH_CNP:
   13780 		sc->sc_flags |= WM_F_HAS_AMT;
   13781 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13782 		break;
   13783 	default:
   13784 		break;
   13785 	}
   13786 
   13787 	/* 1: HAS_MANAGE */
   13788 	if (wm_enable_mng_pass_thru(sc) != 0)
   13789 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13790 
   13791 	/*
   13792 	 * Note that the WOL flags is set after the resetting of the eeprom
   13793 	 * stuff
   13794 	 */
   13795 }
   13796 
   13797 /*
   13798  * Unconfigure Ultra Low Power mode.
   13799  * Only for I217 and newer (see below).
   13800  */
   13801 static void
   13802 wm_ulp_disable(struct wm_softc *sc)
   13803 {
   13804 	uint32_t reg;
   13805 	int i = 0;
   13806 
   13807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13808 		device_xname(sc->sc_dev), __func__));
   13809 	/* Exclude old devices */
   13810 	if ((sc->sc_type < WM_T_PCH_LPT)
   13811 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13812 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13813 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13814 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13815 		return;
   13816 
   13817 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13818 		/* Request ME un-configure ULP mode in the PHY */
   13819 		reg = CSR_READ(sc, WMREG_H2ME);
   13820 		reg &= ~H2ME_ULP;
   13821 		reg |= H2ME_ENFORCE_SETTINGS;
   13822 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13823 
   13824 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13825 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13826 			if (i++ == 30) {
   13827 				printf("%s timed out\n", __func__);
   13828 				return;
   13829 			}
   13830 			delay(10 * 1000);
   13831 		}
   13832 		reg = CSR_READ(sc, WMREG_H2ME);
   13833 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13834 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13835 
   13836 		return;
   13837 	}
   13838 
   13839 	/* Acquire semaphore */
   13840 	sc->phy.acquire(sc);
   13841 
   13842 	/* Toggle LANPHYPC */
   13843 	wm_toggle_lanphypc_pch_lpt(sc);
   13844 
   13845 	/* Unforce SMBus mode in PHY */
   13846 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13847 	if (reg == 0x0000 || reg == 0xffff) {
   13848 		uint32_t reg2;
   13849 
   13850 		printf("%s: Force SMBus first.\n", __func__);
   13851 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13852 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13853 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13854 		delay(50 * 1000);
   13855 
   13856 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13857 	}
   13858 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13859 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13860 
   13861 	/* Unforce SMBus mode in MAC */
   13862 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13863 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13864 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13865 
   13866 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13867 	reg |= HV_PM_CTRL_K1_ENA;
   13868 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13869 
   13870 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13871 	reg &= ~(I218_ULP_CONFIG1_IND
   13872 	    | I218_ULP_CONFIG1_STICKY_ULP
   13873 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13874 	    | I218_ULP_CONFIG1_WOL_HOST
   13875 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13876 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13877 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13878 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13879 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13880 	reg |= I218_ULP_CONFIG1_START;
   13881 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13882 
   13883 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13884 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13885 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13886 
   13887 	/* Release semaphore */
   13888 	sc->phy.release(sc);
   13889 	wm_gmii_reset(sc);
   13890 	delay(50 * 1000);
   13891 }
   13892 
   13893 /* WOL in the newer chipset interfaces (pchlan) */
   13894 static void
   13895 wm_enable_phy_wakeup(struct wm_softc *sc)
   13896 {
   13897 #if 0
   13898 	uint16_t preg;
   13899 
   13900 	/* Copy MAC RARs to PHY RARs */
   13901 
   13902 	/* Copy MAC MTA to PHY MTA */
   13903 
   13904 	/* Configure PHY Rx Control register */
   13905 
   13906 	/* Enable PHY wakeup in MAC register */
   13907 
   13908 	/* Configure and enable PHY wakeup in PHY registers */
   13909 
   13910 	/* Activate PHY wakeup */
   13911 
   13912 	/* XXX */
   13913 #endif
   13914 }
   13915 
   13916 /* Power down workaround on D3 */
   13917 static void
   13918 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13919 {
   13920 	uint32_t reg;
   13921 	int i;
   13922 
   13923 	for (i = 0; i < 2; i++) {
   13924 		/* Disable link */
   13925 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13926 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13927 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13928 
   13929 		/*
   13930 		 * Call gig speed drop workaround on Gig disable before
   13931 		 * accessing any PHY registers
   13932 		 */
   13933 		if (sc->sc_type == WM_T_ICH8)
   13934 			wm_gig_downshift_workaround_ich8lan(sc);
   13935 
   13936 		/* Write VR power-down enable */
   13937 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13938 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13939 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13940 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13941 
   13942 		/* Read it back and test */
   13943 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13944 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13945 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13946 			break;
   13947 
   13948 		/* Issue PHY reset and repeat at most one more time */
   13949 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13950 	}
   13951 }
   13952 
   13953 static void
   13954 wm_enable_wakeup(struct wm_softc *sc)
   13955 {
   13956 	uint32_t reg, pmreg;
   13957 	pcireg_t pmode;
   13958 
   13959 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13960 		device_xname(sc->sc_dev), __func__));
   13961 
   13962 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13963 		&pmreg, NULL) == 0)
   13964 		return;
   13965 
   13966 	/* Advertise the wakeup capability */
   13967 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13968 	    | CTRL_SWDPIN(3));
   13969 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13970 
   13971 	/* ICH workaround */
   13972 	switch (sc->sc_type) {
   13973 	case WM_T_ICH8:
   13974 	case WM_T_ICH9:
   13975 	case WM_T_ICH10:
   13976 	case WM_T_PCH:
   13977 	case WM_T_PCH2:
   13978 	case WM_T_PCH_LPT:
   13979 	case WM_T_PCH_SPT:
   13980 	case WM_T_PCH_CNP:
   13981 		/* Disable gig during WOL */
   13982 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13983 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13984 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13985 		if (sc->sc_type == WM_T_PCH)
   13986 			wm_gmii_reset(sc);
   13987 
   13988 		/* Power down workaround */
   13989 		if (sc->sc_phytype == WMPHY_82577) {
   13990 			struct mii_softc *child;
   13991 
   13992 			/* Assume that the PHY is copper */
   13993 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13994 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13995 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13996 				    (768 << 5) | 25, 0x0444); /* magic num */
   13997 		}
   13998 		break;
   13999 	default:
   14000 		break;
   14001 	}
   14002 
   14003 	/* Keep the laser running on fiber adapters */
   14004 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14005 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14006 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14007 		reg |= CTRL_EXT_SWDPIN(3);
   14008 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14009 	}
   14010 
   14011 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14012 #if 0	/* for the multicast packet */
   14013 	reg |= WUFC_MC;
   14014 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14015 #endif
   14016 
   14017 	if (sc->sc_type >= WM_T_PCH)
   14018 		wm_enable_phy_wakeup(sc);
   14019 	else {
   14020 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14021 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14022 	}
   14023 
   14024 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14025 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14026 		|| (sc->sc_type == WM_T_PCH2))
   14027 		    && (sc->sc_phytype == WMPHY_IGP_3))
   14028 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14029 
   14030 	/* Request PME */
   14031 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14032 #if 0
   14033 	/* Disable WOL */
   14034 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14035 #else
   14036 	/* For WOL */
   14037 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14038 #endif
   14039 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14040 }
   14041 
   14042 /* Disable ASPM L0s and/or L1 for workaround */
   14043 static void
   14044 wm_disable_aspm(struct wm_softc *sc)
   14045 {
   14046 	pcireg_t reg, mask = 0;
   14047 	unsigned const char *str = "";
   14048 
   14049 	/*
   14050 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14051 	 * space.
   14052 	 */
   14053 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14054 		return;
   14055 
   14056 	switch (sc->sc_type) {
   14057 	case WM_T_82571:
   14058 	case WM_T_82572:
   14059 		/*
   14060 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14061 		 * State Power management L1 State (ASPM L1).
   14062 		 */
   14063 		mask = PCIE_LCSR_ASPM_L1;
   14064 		str = "L1 is";
   14065 		break;
   14066 	case WM_T_82573:
   14067 	case WM_T_82574:
   14068 	case WM_T_82583:
   14069 		/*
   14070 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14071 		 *
   14072 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14073 		 * some chipset.  The document of 82574 and 82583 says that
   14074 		 * disabling L0s with some specific chipset is sufficient,
   14075 		 * but we follow as of the Intel em driver does.
   14076 		 *
   14077 		 * References:
   14078 		 * Errata 8 of the Specification Update of i82573.
   14079 		 * Errata 20 of the Specification Update of i82574.
   14080 		 * Errata 9 of the Specification Update of i82583.
   14081 		 */
   14082 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14083 		str = "L0s and L1 are";
   14084 		break;
   14085 	default:
   14086 		return;
   14087 	}
   14088 
   14089 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14090 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14091 	reg &= ~mask;
   14092 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14093 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14094 
   14095 	/* Print only in wm_attach() */
   14096 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14097 		aprint_verbose_dev(sc->sc_dev,
   14098 		    "ASPM %s disabled to workaround the errata.\n",
   14099 			str);
   14100 }
   14101 
   14102 /* LPLU */
   14103 
   14104 static void
   14105 wm_lplu_d0_disable(struct wm_softc *sc)
   14106 {
   14107 	struct mii_data *mii = &sc->sc_mii;
   14108 	uint32_t reg;
   14109 
   14110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14111 		device_xname(sc->sc_dev), __func__));
   14112 
   14113 	if (sc->sc_phytype == WMPHY_IFE)
   14114 		return;
   14115 
   14116 	switch (sc->sc_type) {
   14117 	case WM_T_82571:
   14118 	case WM_T_82572:
   14119 	case WM_T_82573:
   14120 	case WM_T_82575:
   14121 	case WM_T_82576:
   14122 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14123 		reg &= ~PMR_D0_LPLU;
   14124 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14125 		break;
   14126 	case WM_T_82580:
   14127 	case WM_T_I350:
   14128 	case WM_T_I210:
   14129 	case WM_T_I211:
   14130 		reg = CSR_READ(sc, WMREG_PHPM);
   14131 		reg &= ~PHPM_D0A_LPLU;
   14132 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14133 		break;
   14134 	case WM_T_82574:
   14135 	case WM_T_82583:
   14136 	case WM_T_ICH8:
   14137 	case WM_T_ICH9:
   14138 	case WM_T_ICH10:
   14139 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14140 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14141 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14142 		CSR_WRITE_FLUSH(sc);
   14143 		break;
   14144 	case WM_T_PCH:
   14145 	case WM_T_PCH2:
   14146 	case WM_T_PCH_LPT:
   14147 	case WM_T_PCH_SPT:
   14148 	case WM_T_PCH_CNP:
   14149 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14150 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14151 		if (wm_phy_resetisblocked(sc) == false)
   14152 			reg |= HV_OEM_BITS_ANEGNOW;
   14153 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14154 		break;
   14155 	default:
   14156 		break;
   14157 	}
   14158 }
   14159 
   14160 /* EEE */
   14161 
   14162 static void
   14163 wm_set_eee_i350(struct wm_softc *sc)
   14164 {
   14165 	uint32_t ipcnfg, eeer;
   14166 
   14167 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14168 	eeer = CSR_READ(sc, WMREG_EEER);
   14169 
   14170 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14171 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14172 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14173 		    | EEER_LPI_FC);
   14174 	} else {
   14175 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14176 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14177 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14178 		    | EEER_LPI_FC);
   14179 	}
   14180 
   14181 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14182 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14183 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14184 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14185 }
   14186 
   14187 /*
   14188  * Workarounds (mainly PHY related).
   14189  * Basically, PHY's workarounds are in the PHY drivers.
   14190  */
   14191 
   14192 /* Work-around for 82566 Kumeran PCS lock loss */
   14193 static void
   14194 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14195 {
   14196 	struct mii_data *mii = &sc->sc_mii;
   14197 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14198 	int i;
   14199 	int reg;
   14200 
   14201 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14202 		device_xname(sc->sc_dev), __func__));
   14203 
   14204 	/* If the link is not up, do nothing */
   14205 	if ((status & STATUS_LU) == 0)
   14206 		return;
   14207 
   14208 	/* Nothing to do if the link is other than 1Gbps */
   14209 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14210 		return;
   14211 
   14212 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14213 	for (i = 0; i < 10; i++) {
   14214 		/* read twice */
   14215 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14216 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14217 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14218 			goto out;	/* GOOD! */
   14219 
   14220 		/* Reset the PHY */
   14221 		wm_reset_phy(sc);
   14222 		delay(5*1000);
   14223 	}
   14224 
   14225 	/* Disable GigE link negotiation */
   14226 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14227 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14228 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14229 
   14230 	/*
   14231 	 * Call gig speed drop workaround on Gig disable before accessing
   14232 	 * any PHY registers.
   14233 	 */
   14234 	wm_gig_downshift_workaround_ich8lan(sc);
   14235 
   14236 out:
   14237 	return;
   14238 }
   14239 
   14240 /* WOL from S5 stops working */
   14241 static void
   14242 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14243 {
   14244 	uint16_t kmreg;
   14245 
   14246 	/* Only for igp3 */
   14247 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14248 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14249 			return;
   14250 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14251 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14252 			return;
   14253 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14254 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14255 	}
   14256 }
   14257 
   14258 /*
   14259  * Workaround for pch's PHYs
   14260  * XXX should be moved to new PHY driver?
   14261  */
   14262 static void
   14263 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14264 {
   14265 
   14266 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14267 		device_xname(sc->sc_dev), __func__));
   14268 	KASSERT(sc->sc_type == WM_T_PCH);
   14269 
   14270 	if (sc->sc_phytype == WMPHY_82577)
   14271 		wm_set_mdio_slow_mode_hv(sc);
   14272 
   14273 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14274 
   14275 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14276 
   14277 	/* 82578 */
   14278 	if (sc->sc_phytype == WMPHY_82578) {
   14279 		struct mii_softc *child;
   14280 
   14281 		/*
   14282 		 * Return registers to default by doing a soft reset then
   14283 		 * writing 0x3140 to the control register
   14284 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14285 		 */
   14286 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14287 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14288 			PHY_RESET(child);
   14289 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14290 			    0x3140);
   14291 		}
   14292 	}
   14293 
   14294 	/* Select page 0 */
   14295 	sc->phy.acquire(sc);
   14296 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14297 	sc->phy.release(sc);
   14298 
   14299 	/*
   14300 	 * Configure the K1 Si workaround during phy reset assuming there is
   14301 	 * link so that it disables K1 if link is in 1Gbps.
   14302 	 */
   14303 	wm_k1_gig_workaround_hv(sc, 1);
   14304 }
   14305 
   14306 static void
   14307 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14308 {
   14309 
   14310 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14311 		device_xname(sc->sc_dev), __func__));
   14312 	KASSERT(sc->sc_type == WM_T_PCH2);
   14313 
   14314 	wm_set_mdio_slow_mode_hv(sc);
   14315 }
   14316 
   14317 static int
   14318 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14319 {
   14320 	int k1_enable = sc->sc_nvm_k1_enabled;
   14321 
   14322 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14323 		device_xname(sc->sc_dev), __func__));
   14324 
   14325 	if (sc->phy.acquire(sc) != 0)
   14326 		return -1;
   14327 
   14328 	if (link) {
   14329 		k1_enable = 0;
   14330 
   14331 		/* Link stall fix for link up */
   14332 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14333 		    0x0100);
   14334 	} else {
   14335 		/* Link stall fix for link down */
   14336 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14337 		    0x4100);
   14338 	}
   14339 
   14340 	wm_configure_k1_ich8lan(sc, k1_enable);
   14341 	sc->phy.release(sc);
   14342 
   14343 	return 0;
   14344 }
   14345 
   14346 static void
   14347 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14348 {
   14349 	uint32_t reg;
   14350 
   14351 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14352 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14353 	    reg | HV_KMRN_MDIO_SLOW);
   14354 }
   14355 
   14356 static void
   14357 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14358 {
   14359 	uint32_t ctrl, ctrl_ext, tmp;
   14360 	uint16_t kmreg;
   14361 	int rv;
   14362 
   14363 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14364 	if (rv != 0)
   14365 		return;
   14366 
   14367 	if (k1_enable)
   14368 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14369 	else
   14370 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14371 
   14372 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14373 	if (rv != 0)
   14374 		return;
   14375 
   14376 	delay(20);
   14377 
   14378 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14379 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14380 
   14381 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14382 	tmp |= CTRL_FRCSPD;
   14383 
   14384 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14385 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14386 	CSR_WRITE_FLUSH(sc);
   14387 	delay(20);
   14388 
   14389 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14390 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14391 	CSR_WRITE_FLUSH(sc);
   14392 	delay(20);
   14393 
   14394 	return;
   14395 }
   14396 
   14397 /* special case - for 82575 - need to do manual init ... */
   14398 static void
   14399 wm_reset_init_script_82575(struct wm_softc *sc)
   14400 {
   14401 	/*
   14402 	 * remark: this is untested code - we have no board without EEPROM
   14403 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14404 	 */
   14405 
   14406 	/* SerDes configuration via SERDESCTRL */
   14407 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14410 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14411 
   14412 	/* CCM configuration via CCMCTL register */
   14413 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14414 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14415 
   14416 	/* PCIe lanes configuration */
   14417 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14418 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14419 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14420 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14421 
   14422 	/* PCIe PLL Configuration */
   14423 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14424 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14425 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14426 }
   14427 
   14428 static void
   14429 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14430 {
   14431 	uint32_t reg;
   14432 	uint16_t nvmword;
   14433 	int rv;
   14434 
   14435 	if (sc->sc_type != WM_T_82580)
   14436 		return;
   14437 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14438 		return;
   14439 
   14440 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14441 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14442 	if (rv != 0) {
   14443 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14444 		    __func__);
   14445 		return;
   14446 	}
   14447 
   14448 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14449 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14450 		reg |= MDICNFG_DEST;
   14451 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14452 		reg |= MDICNFG_COM_MDIO;
   14453 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14454 }
   14455 
   14456 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14457 
   14458 static bool
   14459 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14460 {
   14461 	int i;
   14462 	uint32_t reg;
   14463 	uint16_t id1, id2;
   14464 
   14465 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14466 		device_xname(sc->sc_dev), __func__));
   14467 	id1 = id2 = 0xffff;
   14468 	for (i = 0; i < 2; i++) {
   14469 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14470 		if (MII_INVALIDID(id1))
   14471 			continue;
   14472 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14473 		if (MII_INVALIDID(id2))
   14474 			continue;
   14475 		break;
   14476 	}
   14477 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14478 		goto out;
   14479 	}
   14480 
   14481 	if (sc->sc_type < WM_T_PCH_LPT) {
   14482 		sc->phy.release(sc);
   14483 		wm_set_mdio_slow_mode_hv(sc);
   14484 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14485 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14486 		sc->phy.acquire(sc);
   14487 	}
   14488 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14489 		printf("XXX return with false\n");
   14490 		return false;
   14491 	}
   14492 out:
   14493 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14494 		/* Only unforce SMBus if ME is not active */
   14495 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14496 			/* Unforce SMBus mode in PHY */
   14497 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14498 			    CV_SMB_CTRL);
   14499 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14500 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14501 			    CV_SMB_CTRL, reg);
   14502 
   14503 			/* Unforce SMBus mode in MAC */
   14504 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14505 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14506 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14507 		}
   14508 	}
   14509 	return true;
   14510 }
   14511 
   14512 static void
   14513 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14514 {
   14515 	uint32_t reg;
   14516 	int i;
   14517 
   14518 	/* Set PHY Config Counter to 50msec */
   14519 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14520 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14521 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14522 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14523 
   14524 	/* Toggle LANPHYPC */
   14525 	reg = CSR_READ(sc, WMREG_CTRL);
   14526 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14527 	reg &= ~CTRL_LANPHYPC_VALUE;
   14528 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14529 	CSR_WRITE_FLUSH(sc);
   14530 	delay(1000);
   14531 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14532 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14533 	CSR_WRITE_FLUSH(sc);
   14534 
   14535 	if (sc->sc_type < WM_T_PCH_LPT)
   14536 		delay(50 * 1000);
   14537 	else {
   14538 		i = 20;
   14539 
   14540 		do {
   14541 			delay(5 * 1000);
   14542 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14543 		    && i--);
   14544 
   14545 		delay(30 * 1000);
   14546 	}
   14547 }
   14548 
   14549 static int
   14550 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14551 {
   14552 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14553 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14554 	uint32_t rxa;
   14555 	uint16_t scale = 0, lat_enc = 0;
   14556 	int32_t obff_hwm = 0;
   14557 	int64_t lat_ns, value;
   14558 
   14559 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14560 		device_xname(sc->sc_dev), __func__));
   14561 
   14562 	if (link) {
   14563 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14564 		uint32_t status;
   14565 		uint16_t speed;
   14566 		pcireg_t preg;
   14567 
   14568 		status = CSR_READ(sc, WMREG_STATUS);
   14569 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14570 		case STATUS_SPEED_10:
   14571 			speed = 10;
   14572 			break;
   14573 		case STATUS_SPEED_100:
   14574 			speed = 100;
   14575 			break;
   14576 		case STATUS_SPEED_1000:
   14577 			speed = 1000;
   14578 			break;
   14579 		default:
   14580 			device_printf(sc->sc_dev, "Unknown speed "
   14581 			    "(status = %08x)\n", status);
   14582 			return -1;
   14583 		}
   14584 
   14585 		/* Rx Packet Buffer Allocation size (KB) */
   14586 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14587 
   14588 		/*
   14589 		 * Determine the maximum latency tolerated by the device.
   14590 		 *
   14591 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14592 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14593 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14594 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14595 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14596 		 */
   14597 		lat_ns = ((int64_t)rxa * 1024 -
   14598 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14599 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14600 		if (lat_ns < 0)
   14601 			lat_ns = 0;
   14602 		else
   14603 			lat_ns /= speed;
   14604 		value = lat_ns;
   14605 
   14606 		while (value > LTRV_VALUE) {
   14607 			scale ++;
   14608 			value = howmany(value, __BIT(5));
   14609 		}
   14610 		if (scale > LTRV_SCALE_MAX) {
   14611 			printf("%s: Invalid LTR latency scale %d\n",
   14612 			    device_xname(sc->sc_dev), scale);
   14613 			return -1;
   14614 		}
   14615 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14616 
   14617 		/* Determine the maximum latency tolerated by the platform */
   14618 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14619 		    WM_PCI_LTR_CAP_LPT);
   14620 		max_snoop = preg & 0xffff;
   14621 		max_nosnoop = preg >> 16;
   14622 
   14623 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14624 
   14625 		if (lat_enc > max_ltr_enc) {
   14626 			lat_enc = max_ltr_enc;
   14627 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14628 			    * PCI_LTR_SCALETONS(
   14629 				    __SHIFTOUT(lat_enc,
   14630 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14631 		}
   14632 
   14633 		if (lat_ns) {
   14634 			lat_ns *= speed * 1000;
   14635 			lat_ns /= 8;
   14636 			lat_ns /= 1000000000;
   14637 			obff_hwm = (int32_t)(rxa - lat_ns);
   14638 		}
   14639 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14640 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14641 			    "(rxa = %d, lat_ns = %d)\n",
   14642 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14643 			return -1;
   14644 		}
   14645 	}
   14646 	/* Snoop and No-Snoop latencies the same */
   14647 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14648 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14649 
   14650 	/* Set OBFF high water mark */
   14651 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14652 	reg |= obff_hwm;
   14653 	CSR_WRITE(sc, WMREG_SVT, reg);
   14654 
   14655 	/* Enable OBFF */
   14656 	reg = CSR_READ(sc, WMREG_SVCR);
   14657 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14658 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14659 
   14660 	return 0;
   14661 }
   14662 
   14663 /*
   14664  * I210 Errata 25 and I211 Errata 10
   14665  * Slow System Clock.
   14666  */
   14667 static void
   14668 wm_pll_workaround_i210(struct wm_softc *sc)
   14669 {
   14670 	uint32_t mdicnfg, wuc;
   14671 	uint32_t reg;
   14672 	pcireg_t pcireg;
   14673 	uint32_t pmreg;
   14674 	uint16_t nvmword, tmp_nvmword;
   14675 	int phyval;
   14676 	bool wa_done = false;
   14677 	int i;
   14678 
   14679 	/* Save WUC and MDICNFG registers */
   14680 	wuc = CSR_READ(sc, WMREG_WUC);
   14681 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14682 
   14683 	reg = mdicnfg & ~MDICNFG_DEST;
   14684 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14685 
   14686 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14687 		nvmword = INVM_DEFAULT_AL;
   14688 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14689 
   14690 	/* Get Power Management cap offset */
   14691 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14692 		&pmreg, NULL) == 0)
   14693 		return;
   14694 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14695 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14696 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14697 
   14698 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14699 			break; /* OK */
   14700 		}
   14701 
   14702 		wa_done = true;
   14703 		/* Directly reset the internal PHY */
   14704 		reg = CSR_READ(sc, WMREG_CTRL);
   14705 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14706 
   14707 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14708 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14709 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14710 
   14711 		CSR_WRITE(sc, WMREG_WUC, 0);
   14712 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14713 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14714 
   14715 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14716 		    pmreg + PCI_PMCSR);
   14717 		pcireg |= PCI_PMCSR_STATE_D3;
   14718 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14719 		    pmreg + PCI_PMCSR, pcireg);
   14720 		delay(1000);
   14721 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14722 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14723 		    pmreg + PCI_PMCSR, pcireg);
   14724 
   14725 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14726 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14727 
   14728 		/* Restore WUC register */
   14729 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14730 	}
   14731 
   14732 	/* Restore MDICNFG setting */
   14733 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14734 	if (wa_done)
   14735 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14736 }
   14737 
   14738 static void
   14739 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14740 {
   14741 	uint32_t reg;
   14742 
   14743 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14744 		device_xname(sc->sc_dev), __func__));
   14745 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14746 
   14747 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14748 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14749 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14750 
   14751 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14752 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14753 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14754 }
   14755