Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.573
      1 /*	$NetBSD: if_wm.c,v 1.573 2018/04/16 08:31:06 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.573 2018/04/16 08:31:06 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544.  We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames.
    203  */
    204 #define	WM_NTXSEGS		256
    205 #define	WM_IFQUEUELEN		256
    206 #define	WM_TXQUEUELEN_MAX	64
    207 #define	WM_TXQUEUELEN_MAX_82547	16
    208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    211 #define	WM_NTXDESC_82542	256
    212 #define	WM_NTXDESC_82544	4096
    213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    218 
    219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    220 
    221 #define	WM_TXINTERQSIZE		256
    222 
    223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    225 #endif
    226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    228 #endif
    229 
    230 /*
    231  * Receive descriptor list size.  We have one Rx buffer for normal
    232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    233  * packet.  We allocate 256 receive descriptors, each with a 2k
    234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    235  */
    236 #define	WM_NRXDESC		256
    237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    240 
    241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 typedef union txdescs {
    249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    251 } txdescs_t;
    252 
    253 typedef union rxdescs {
    254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    257 } rxdescs_t;
    258 
    259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    261 
    262 /*
    263  * Software state for transmit jobs.
    264  */
    265 struct wm_txsoft {
    266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    268 	int txs_firstdesc;		/* first descriptor in packet */
    269 	int txs_lastdesc;		/* last descriptor in packet */
    270 	int txs_ndesc;			/* # of descriptors used */
    271 };
    272 
    273 /*
    274  * Software state for receive buffers.  Each descriptor gets a
    275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    276  * more than one buffer, we chain them together.
    277  */
    278 struct wm_rxsoft {
    279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    281 };
    282 
    283 #define WM_LINKUP_TIMEOUT	50
    284 
    285 static uint16_t swfwphysem[] = {
    286 	SWFW_PHY0_SM,
    287 	SWFW_PHY1_SM,
    288 	SWFW_PHY2_SM,
    289 	SWFW_PHY3_SM
    290 };
    291 
    292 static const uint32_t wm_82580_rxpbs_table[] = {
    293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    294 };
    295 
    296 struct wm_softc;
    297 
    298 #ifdef WM_EVENT_COUNTERS
    299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    301 	struct evcnt qname##_ev_##evname;
    302 
    303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    304 	do{								\
    305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    307 		    "%s%02d%s", #qname, (qnum), #evname);		\
    308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    309 		    (evtype), NULL, (xname),				\
    310 		    (q)->qname##_##evname##_evcnt_name);		\
    311 	}while(0)
    312 
    313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    315 
    316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    318 
    319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    320 	evcnt_detach(&(q)->qname##_ev_##evname);
    321 #endif /* WM_EVENT_COUNTERS */
    322 
    323 struct wm_txqueue {
    324 	kmutex_t *txq_lock;		/* lock for tx operations */
    325 
    326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    327 
    328 	/* Software state for the transmit descriptors. */
    329 	int txq_num;			/* must be a power of two */
    330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    331 
    332 	/* TX control data structures. */
    333 	int txq_ndesc;			/* must be a power of two */
    334 	size_t txq_descsize;		/* a tx descriptor size */
    335 	txdescs_t *txq_descs_u;
    336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    338 	int txq_desc_rseg;		/* real number of control segment */
    339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    340 #define	txq_descs	txq_descs_u->sctxu_txdescs
    341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    342 
    343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    344 
    345 	int txq_free;			/* number of free Tx descriptors */
    346 	int txq_next;			/* next ready Tx descriptor */
    347 
    348 	int txq_sfree;			/* number of free Tx jobs */
    349 	int txq_snext;			/* next free Tx job */
    350 	int txq_sdirty;			/* dirty Tx jobs */
    351 
    352 	/* These 4 variables are used only on the 82547. */
    353 	int txq_fifo_size;		/* Tx FIFO size */
    354 	int txq_fifo_head;		/* current head of FIFO */
    355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    357 
    358 	/*
    359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    360 	 * CPUs. This queue intermediate them without block.
    361 	 */
    362 	pcq_t *txq_interq;
    363 
    364 	/*
    365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    366 	 * to manage Tx H/W queue's busy flag.
    367 	 */
    368 	int txq_flags;			/* flags for H/W queue, see below */
    369 #define	WM_TXQ_NO_SPACE	0x1
    370 
    371 	bool txq_stopping;
    372 
    373 	bool txq_watchdog;
    374 	time_t txq_lastsent;
    375 
    376 	uint32_t txq_packets;		/* for AIM */
    377 	uint32_t txq_bytes;		/* for AIM */
    378 #ifdef WM_EVENT_COUNTERS
    379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    384 						/* XXX not used? */
    385 
    386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq, txtusum)		/* TCP/UDP cksums comp. out-bound */
    388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    394 
    395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    396 
    397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    399 #endif /* WM_EVENT_COUNTERS */
    400 };
    401 
    402 struct wm_rxqueue {
    403 	kmutex_t *rxq_lock;		/* lock for rx operations */
    404 
    405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    406 
    407 	/* Software state for the receive descriptors. */
    408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    409 
    410 	/* RX control data structures. */
    411 	int rxq_ndesc;			/* must be a power of two */
    412 	size_t rxq_descsize;		/* a rx descriptor size */
    413 	rxdescs_t *rxq_descs_u;
    414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    416 	int rxq_desc_rseg;		/* real number of control segment */
    417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    421 
    422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    423 
    424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    425 	int rxq_discard;
    426 	int rxq_len;
    427 	struct mbuf *rxq_head;
    428 	struct mbuf *rxq_tail;
    429 	struct mbuf **rxq_tailp;
    430 
    431 	bool rxq_stopping;
    432 
    433 	uint32_t rxq_packets;		/* for AIM */
    434 	uint32_t rxq_bytes;		/* for AIM */
    435 #ifdef WM_EVENT_COUNTERS
    436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    438 
    439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    441 #endif
    442 };
    443 
    444 struct wm_queue {
    445 	int wmq_id;			/* index of TX/RX queues */
    446 	int wmq_intr_idx;		/* index of MSI-X tables */
    447 
    448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    449 	bool wmq_set_itr;
    450 
    451 	struct wm_txqueue wmq_txq;
    452 	struct wm_rxqueue wmq_rxq;
    453 
    454 	void *wmq_si;
    455 };
    456 
    457 struct wm_phyop {
    458 	int (*acquire)(struct wm_softc *);
    459 	void (*release)(struct wm_softc *);
    460 	int reset_delay_us;
    461 };
    462 
    463 struct wm_nvmop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    467 };
    468 
    469 /*
    470  * Software state per device.
    471  */
    472 struct wm_softc {
    473 	device_t sc_dev;		/* generic device information */
    474 	bus_space_tag_t sc_st;		/* bus space tag */
    475 	bus_space_handle_t sc_sh;	/* bus space handle */
    476 	bus_size_t sc_ss;		/* bus space size */
    477 	bus_space_tag_t sc_iot;		/* I/O space tag */
    478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    479 	bus_size_t sc_ios;		/* I/O space size */
    480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    482 	bus_size_t sc_flashs;		/* flash registers space size */
    483 	off_t sc_flashreg_offset;	/*
    484 					 * offset to flash registers from
    485 					 * start of BAR
    486 					 */
    487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    488 
    489 	struct ethercom sc_ethercom;	/* ethernet common data */
    490 	struct mii_data sc_mii;		/* MII/media information */
    491 
    492 	pci_chipset_tag_t sc_pc;
    493 	pcitag_t sc_pcitag;
    494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    496 
    497 	uint16_t sc_pcidevid;		/* PCI device ID */
    498 	wm_chip_type sc_type;		/* MAC type */
    499 	int sc_rev;			/* MAC revision */
    500 	wm_phy_type sc_phytype;		/* PHY type */
    501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    502 #define	WM_MEDIATYPE_UNKNOWN		0x00
    503 #define	WM_MEDIATYPE_FIBER		0x01
    504 #define	WM_MEDIATYPE_COPPER		0x02
    505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    507 	int sc_flags;			/* flags; see below */
    508 	int sc_if_flags;		/* last if_flags */
    509 	int sc_flowflags;		/* 802.3x flow control flags */
    510 	int sc_align_tweak;
    511 
    512 	void *sc_ihs[WM_MAX_NINTR];	/*
    513 					 * interrupt cookie.
    514 					 * - legacy and msi use sc_ihs[0] only
    515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    516 					 */
    517 	pci_intr_handle_t *sc_intrs;	/*
    518 					 * legacy and msi use sc_intrs[0] only
    519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    520 					 */
    521 	int sc_nintrs;			/* number of interrupts */
    522 
    523 	int sc_link_intr_idx;		/* index of MSI-X tables */
    524 
    525 	callout_t sc_tick_ch;		/* tick callout */
    526 	bool sc_core_stopping;
    527 
    528 	int sc_nvm_ver_major;
    529 	int sc_nvm_ver_minor;
    530 	int sc_nvm_ver_build;
    531 	int sc_nvm_addrbits;		/* NVM address bits */
    532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    533 	int sc_ich8_flash_base;
    534 	int sc_ich8_flash_bank_size;
    535 	int sc_nvm_k1_enabled;
    536 
    537 	int sc_nqueues;
    538 	struct wm_queue *sc_queue;
    539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    543 
    544 	int sc_affinity_offset;
    545 
    546 #ifdef WM_EVENT_COUNTERS
    547 	/* Event counters. */
    548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    549 
    550         /* WM_T_82542_2_1 only */
    551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    556 #endif /* WM_EVENT_COUNTERS */
    557 
    558 	/* This variable are used only on the 82547. */
    559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    560 
    561 	uint32_t sc_ctrl;		/* prototype CTRL register */
    562 #if 0
    563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    564 #endif
    565 	uint32_t sc_icr;		/* prototype interrupt bits */
    566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    567 	uint32_t sc_tctl;		/* prototype TCTL register */
    568 	uint32_t sc_rctl;		/* prototype RCTL register */
    569 	uint32_t sc_txcw;		/* prototype TXCW register */
    570 	uint32_t sc_tipg;		/* prototype TIPG register */
    571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    572 	uint32_t sc_pba;		/* prototype PBA register */
    573 
    574 	int sc_tbi_linkup;		/* TBI link status */
    575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    577 
    578 	int sc_mchash_type;		/* multicast filter offset */
    579 
    580 	krndsource_t rnd_source;	/* random source */
    581 
    582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    583 
    584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    585 	kmutex_t *sc_ich_phymtx;	/*
    586 					 * 82574/82583/ICH/PCH specific PHY
    587 					 * mutex. For 82574/82583, the mutex
    588 					 * is used for both PHY and NVM.
    589 					 */
    590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    591 
    592 	struct wm_phyop phy;
    593 	struct wm_nvmop nvm;
    594 };
    595 
    596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    599 
    600 #define	WM_RXCHAIN_RESET(rxq)						\
    601 do {									\
    602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    603 	*(rxq)->rxq_tailp = NULL;					\
    604 	(rxq)->rxq_len = 0;						\
    605 } while (/*CONSTCOND*/0)
    606 
    607 #define	WM_RXCHAIN_LINK(rxq, m)						\
    608 do {									\
    609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    610 	(rxq)->rxq_tailp = &(m)->m_next;				\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #ifdef WM_EVENT_COUNTERS
    614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    616 
    617 #define WM_Q_EVCNT_INCR(qname, evname)			\
    618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    621 #else /* !WM_EVENT_COUNTERS */
    622 #define	WM_EVCNT_INCR(ev)	/* nothing */
    623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    627 #endif /* !WM_EVENT_COUNTERS */
    628 
    629 #define	CSR_READ(sc, reg)						\
    630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    631 #define	CSR_WRITE(sc, reg, val)						\
    632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    633 #define	CSR_WRITE_FLUSH(sc)						\
    634 	(void) CSR_READ((sc), WMREG_STATUS)
    635 
    636 #define ICH8_FLASH_READ32(sc, reg)					\
    637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    638 	    (reg) + sc->sc_flashreg_offset)
    639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    641 	    (reg) + sc->sc_flashreg_offset, (data))
    642 
    643 #define ICH8_FLASH_READ16(sc, reg)					\
    644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    645 	    (reg) + sc->sc_flashreg_offset)
    646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset, (data))
    649 
    650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    652 
    653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    654 #define	WM_CDTXADDR_HI(txq, x)						\
    655 	(sizeof(bus_addr_t) == 8 ?					\
    656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    657 
    658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    659 #define	WM_CDRXADDR_HI(rxq, x)						\
    660 	(sizeof(bus_addr_t) == 8 ?					\
    661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    662 
    663 /*
    664  * Register read/write functions.
    665  * Other than CSR_{READ|WRITE}().
    666  */
    667 #if 0
    668 static inline uint32_t wm_io_read(struct wm_softc *, int);
    669 #endif
    670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    672 	uint32_t, uint32_t);
    673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    674 
    675 /*
    676  * Descriptor sync/init functions.
    677  */
    678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    681 
    682 /*
    683  * Device driver interface functions and commonly used functions.
    684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    685  */
    686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    687 static int	wm_match(device_t, cfdata_t, void *);
    688 static void	wm_attach(device_t, device_t, void *);
    689 static int	wm_detach(device_t, int);
    690 static bool	wm_suspend(device_t, const pmf_qual_t *);
    691 static bool	wm_resume(device_t, const pmf_qual_t *);
    692 static void	wm_watchdog(struct ifnet *);
    693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    694     uint16_t *);
    695 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    696     uint16_t *);
    697 static void	wm_tick(void *);
    698 static int	wm_ifflags_cb(struct ethercom *);
    699 static int	wm_ioctl(struct ifnet *, u_long, void *);
    700 /* MAC address related */
    701 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    702 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    703 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    704 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    705 static void	wm_set_filter(struct wm_softc *);
    706 /* Reset and init related */
    707 static void	wm_set_vlan(struct wm_softc *);
    708 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    709 static void	wm_get_auto_rd_done(struct wm_softc *);
    710 static void	wm_lan_init_done(struct wm_softc *);
    711 static void	wm_get_cfg_done(struct wm_softc *);
    712 static void	wm_phy_post_reset(struct wm_softc *);
    713 static void	wm_write_smbus_addr(struct wm_softc *);
    714 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    715 static void	wm_initialize_hardware_bits(struct wm_softc *);
    716 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    717 static void	wm_reset_phy(struct wm_softc *);
    718 static void	wm_flush_desc_rings(struct wm_softc *);
    719 static void	wm_reset(struct wm_softc *);
    720 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    721 static void	wm_rxdrain(struct wm_rxqueue *);
    722 static void	wm_init_rss(struct wm_softc *);
    723 static void	wm_adjust_qnum(struct wm_softc *, int);
    724 static inline bool	wm_is_using_msix(struct wm_softc *);
    725 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    726 static int	wm_softint_establish(struct wm_softc *, int, int);
    727 static int	wm_setup_legacy(struct wm_softc *);
    728 static int	wm_setup_msix(struct wm_softc *);
    729 static int	wm_init(struct ifnet *);
    730 static int	wm_init_locked(struct ifnet *);
    731 static void	wm_unset_stopping_flags(struct wm_softc *);
    732 static void	wm_set_stopping_flags(struct wm_softc *);
    733 static void	wm_stop(struct ifnet *, int);
    734 static void	wm_stop_locked(struct ifnet *, int);
    735 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    736 static void	wm_82547_txfifo_stall(void *);
    737 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    738 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    739 /* DMA related */
    740 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    741 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    742 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    743 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    744     struct wm_txqueue *);
    745 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    746 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    747 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    748     struct wm_rxqueue *);
    749 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    752 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    754 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    758     struct wm_rxqueue *);
    759 static int	wm_alloc_txrx_queues(struct wm_softc *);
    760 static void	wm_free_txrx_queues(struct wm_softc *);
    761 static int	wm_init_txrx_queues(struct wm_softc *);
    762 /* Start */
    763 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    764     struct wm_txsoft *, uint32_t *, uint8_t *);
    765 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    766 static void	wm_start(struct ifnet *);
    767 static void	wm_start_locked(struct ifnet *);
    768 static int	wm_transmit(struct ifnet *, struct mbuf *);
    769 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    770 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    771     bool);
    772 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    774 static void	wm_nq_start(struct ifnet *);
    775 static void	wm_nq_start_locked(struct ifnet *);
    776 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    777 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    778 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    779     bool);
    780 static void	wm_deferred_start_locked(struct wm_txqueue *);
    781 static void	wm_handle_queue(void *);
    782 /* Interrupt */
    783 static bool	wm_txeof(struct wm_txqueue *, u_int);
    784 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    785 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    786 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    787 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    788 static void	wm_linkintr(struct wm_softc *, uint32_t);
    789 static int	wm_intr_legacy(void *);
    790 static inline void	wm_txrxintr_disable(struct wm_queue *);
    791 static inline void	wm_txrxintr_enable(struct wm_queue *);
    792 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    793 static int	wm_txrxintr_msix(void *);
    794 static int	wm_linkintr_msix(void *);
    795 
    796 /*
    797  * Media related.
    798  * GMII, SGMII, TBI, SERDES and SFP.
    799  */
    800 /* Common */
    801 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    802 /* GMII related */
    803 static void	wm_gmii_reset(struct wm_softc *);
    804 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    805 static int	wm_get_phy_id_82575(struct wm_softc *);
    806 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    807 static int	wm_gmii_mediachange(struct ifnet *);
    808 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    809 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    810 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    811 static int	wm_gmii_i82543_readreg(device_t, int, int);
    812 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    813 static int	wm_gmii_mdic_readreg(device_t, int, int);
    814 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    815 static int	wm_gmii_i82544_readreg(device_t, int, int);
    816 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    817 static int	wm_gmii_i80003_readreg(device_t, int, int);
    818 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    819 static int	wm_gmii_bm_readreg(device_t, int, int);
    820 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    821 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    822 static int	wm_gmii_hv_readreg(device_t, int, int);
    823 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    824 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    825 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    826 static int	wm_gmii_82580_readreg(device_t, int, int);
    827 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    828 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    829 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    830 static void	wm_gmii_statchg(struct ifnet *);
    831 /*
    832  * kumeran related (80003, ICH* and PCH*).
    833  * These functions are not for accessing MII registers but for accessing
    834  * kumeran specific registers.
    835  */
    836 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    837 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    838 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    839 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    840 /* SGMII */
    841 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    842 static int	wm_sgmii_readreg(device_t, int, int);
    843 static void	wm_sgmii_writereg(device_t, int, int, int);
    844 /* TBI related */
    845 static void	wm_tbi_mediainit(struct wm_softc *);
    846 static int	wm_tbi_mediachange(struct ifnet *);
    847 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    848 static int	wm_check_for_link(struct wm_softc *);
    849 static void	wm_tbi_tick(struct wm_softc *);
    850 /* SERDES related */
    851 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    852 static int	wm_serdes_mediachange(struct ifnet *);
    853 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    854 static void	wm_serdes_tick(struct wm_softc *);
    855 /* SFP related */
    856 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    857 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    858 
    859 /*
    860  * NVM related.
    861  * Microwire, SPI (w/wo EERD) and Flash.
    862  */
    863 /* Misc functions */
    864 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    865 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    866 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    867 /* Microwire */
    868 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    869 /* SPI */
    870 static int	wm_nvm_ready_spi(struct wm_softc *);
    871 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    872 /* Using with EERD */
    873 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    874 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    875 /* Flash */
    876 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    877     unsigned int *);
    878 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    879 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    880 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    881 	uint32_t *);
    882 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    883 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    884 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    885 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    886 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    887 /* iNVM */
    888 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    889 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    890 /* Lock, detecting NVM type, validate checksum and read */
    891 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    892 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    893 static int	wm_nvm_validate_checksum(struct wm_softc *);
    894 static void	wm_nvm_version_invm(struct wm_softc *);
    895 static void	wm_nvm_version(struct wm_softc *);
    896 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    897 
    898 /*
    899  * Hardware semaphores.
    900  * Very complexed...
    901  */
    902 static int	wm_get_null(struct wm_softc *);
    903 static void	wm_put_null(struct wm_softc *);
    904 static int	wm_get_eecd(struct wm_softc *);
    905 static void	wm_put_eecd(struct wm_softc *);
    906 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    907 static void	wm_put_swsm_semaphore(struct wm_softc *);
    908 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    909 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    910 static int	wm_get_nvm_80003(struct wm_softc *);
    911 static void	wm_put_nvm_80003(struct wm_softc *);
    912 static int	wm_get_nvm_82571(struct wm_softc *);
    913 static void	wm_put_nvm_82571(struct wm_softc *);
    914 static int	wm_get_phy_82575(struct wm_softc *);
    915 static void	wm_put_phy_82575(struct wm_softc *);
    916 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    917 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    918 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    919 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    920 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    921 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    922 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    923 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    924 
    925 /*
    926  * Management mode and power management related subroutines.
    927  * BMC, AMT, suspend/resume and EEE.
    928  */
    929 #if 0
    930 static int	wm_check_mng_mode(struct wm_softc *);
    931 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    932 static int	wm_check_mng_mode_82574(struct wm_softc *);
    933 static int	wm_check_mng_mode_generic(struct wm_softc *);
    934 #endif
    935 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    936 static bool	wm_phy_resetisblocked(struct wm_softc *);
    937 static void	wm_get_hw_control(struct wm_softc *);
    938 static void	wm_release_hw_control(struct wm_softc *);
    939 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    940 static void	wm_smbustopci(struct wm_softc *);
    941 static void	wm_init_manageability(struct wm_softc *);
    942 static void	wm_release_manageability(struct wm_softc *);
    943 static void	wm_get_wakeup(struct wm_softc *);
    944 static void	wm_ulp_disable(struct wm_softc *);
    945 static void	wm_enable_phy_wakeup(struct wm_softc *);
    946 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    947 static void	wm_enable_wakeup(struct wm_softc *);
    948 static void	wm_disable_aspm(struct wm_softc *);
    949 /* LPLU (Low Power Link Up) */
    950 static void	wm_lplu_d0_disable(struct wm_softc *);
    951 /* EEE */
    952 static void	wm_set_eee_i350(struct wm_softc *);
    953 
    954 /*
    955  * Workarounds (mainly PHY related).
    956  * Basically, PHY's workarounds are in the PHY drivers.
    957  */
    958 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    960 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    961 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    962 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    963 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    964 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    965 static void	wm_reset_init_script_82575(struct wm_softc *);
    966 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    967 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    968 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    969 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    970 static void	wm_pll_workaround_i210(struct wm_softc *);
    971 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    972 
    973 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    974     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    975 
    976 /*
    977  * Devices supported by this driver.
    978  */
    979 static const struct wm_product {
    980 	pci_vendor_id_t		wmp_vendor;
    981 	pci_product_id_t	wmp_product;
    982 	const char		*wmp_name;
    983 	wm_chip_type		wmp_type;
    984 	uint32_t		wmp_flags;
    985 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    986 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    987 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    988 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    989 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    990 } wm_products[] = {
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    992 	  "Intel i82542 1000BASE-X Ethernet",
    993 	  WM_T_82542_2_1,	WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    996 	  "Intel i82543GC 1000BASE-X Ethernet",
    997 	  WM_T_82543,		WMP_F_FIBER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1000 	  "Intel i82543GC 1000BASE-T Ethernet",
   1001 	  WM_T_82543,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1004 	  "Intel i82544EI 1000BASE-T Ethernet",
   1005 	  WM_T_82544,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1008 	  "Intel i82544EI 1000BASE-X Ethernet",
   1009 	  WM_T_82544,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1012 	  "Intel i82544GC 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1016 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1017 	  WM_T_82544,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1020 	  "Intel i82540EM 1000BASE-T Ethernet",
   1021 	  WM_T_82540,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1024 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82540,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1028 	  "Intel i82540EP 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1032 	  "Intel i82540EP 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1040 	  "Intel i82545EM 1000BASE-T Ethernet",
   1041 	  WM_T_82545,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1044 	  "Intel i82545GM 1000BASE-T Ethernet",
   1045 	  WM_T_82545_3,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1048 	  "Intel i82545GM 1000BASE-X Ethernet",
   1049 	  WM_T_82545_3,		WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1052 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1053 	  WM_T_82545_3,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1056 	  "Intel i82546EB 1000BASE-T Ethernet",
   1057 	  WM_T_82546,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1060 	  "Intel i82546EB 1000BASE-T Ethernet",
   1061 	  WM_T_82546,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1064 	  "Intel i82545EM 1000BASE-X Ethernet",
   1065 	  WM_T_82545,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1068 	  "Intel i82546EB 1000BASE-X Ethernet",
   1069 	  WM_T_82546,		WMP_F_FIBER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1072 	  "Intel i82546GB 1000BASE-T Ethernet",
   1073 	  WM_T_82546_3,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1076 	  "Intel i82546GB 1000BASE-X Ethernet",
   1077 	  WM_T_82546_3,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1080 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82546_3,		WMP_F_SERDES },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1084 	  "i82546GB quad-port Gigabit Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1088 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1092 	  "Intel PRO/1000MT (82546GB)",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1096 	  "Intel i82541EI 1000BASE-T Ethernet",
   1097 	  WM_T_82541,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1100 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1101 	  WM_T_82541,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1104 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1108 	  "Intel i82541ER 1000BASE-T Ethernet",
   1109 	  WM_T_82541_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1112 	  "Intel i82541GI 1000BASE-T Ethernet",
   1113 	  WM_T_82541_2,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1116 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1120 	  "Intel i82541PI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1124 	  "Intel i82547EI 1000BASE-T Ethernet",
   1125 	  WM_T_82547,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1128 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1129 	  WM_T_82547,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1132 	  "Intel i82547GI 1000BASE-T Ethernet",
   1133 	  WM_T_82547_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1136 	  "Intel PRO/1000 PT (82571EB)",
   1137 	  WM_T_82571,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1140 	  "Intel PRO/1000 PF (82571EB)",
   1141 	  WM_T_82571,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1144 	  "Intel PRO/1000 PB (82571EB)",
   1145 	  WM_T_82571,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1148 	  "Intel PRO/1000 QT (82571EB)",
   1149 	  WM_T_82571,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1152 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1153 	  WM_T_82571,		WMP_F_COPPER, },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1156 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1157 	  WM_T_82571,		WMP_F_COPPER, },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1160 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1161 	  WM_T_82571,		WMP_F_SERDES, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1164 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1165 	  WM_T_82571,		WMP_F_SERDES, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1168 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1169 	  WM_T_82571,		WMP_F_FIBER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1172 	  "Intel i82572EI 1000baseT Ethernet",
   1173 	  WM_T_82572,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1176 	  "Intel i82572EI 1000baseX Ethernet",
   1177 	  WM_T_82572,		WMP_F_FIBER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1180 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1181 	  WM_T_82572,		WMP_F_SERDES },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1184 	  "Intel i82572EI 1000baseT Ethernet",
   1185 	  WM_T_82572,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1188 	  "Intel i82573E",
   1189 	  WM_T_82573,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1192 	  "Intel i82573E IAMT",
   1193 	  WM_T_82573,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1196 	  "Intel i82573L Gigabit Ethernet",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1200 	  "Intel i82574L",
   1201 	  WM_T_82574,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1204 	  "Intel i82574L",
   1205 	  WM_T_82574,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1208 	  "Intel i82583V",
   1209 	  WM_T_82583,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1212 	  "i80003 dual 1000baseT Ethernet",
   1213 	  WM_T_80003,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1216 	  "i80003 dual 1000baseX Ethernet",
   1217 	  WM_T_80003,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1220 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1221 	  WM_T_80003,		WMP_F_SERDES },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1224 	  "Intel i80003 1000baseT Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1228 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1232 	  "Intel i82801H (M_AMT) LAN Controller",
   1233 	  WM_T_ICH8,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1235 	  "Intel i82801H (AMT) LAN Controller",
   1236 	  WM_T_ICH8,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1238 	  "Intel i82801H LAN Controller",
   1239 	  WM_T_ICH8,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1241 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1242 	  WM_T_ICH8,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1244 	  "Intel i82801H (M) LAN Controller",
   1245 	  WM_T_ICH8,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1247 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1248 	  WM_T_ICH8,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1250 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1251 	  WM_T_ICH8,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1253 	  "82567V-3 LAN Controller",
   1254 	  WM_T_ICH8,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1256 	  "82801I (AMT) LAN Controller",
   1257 	  WM_T_ICH9,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1259 	  "82801I 10/100 LAN Controller",
   1260 	  WM_T_ICH9,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1262 	  "82801I (G) 10/100 LAN Controller",
   1263 	  WM_T_ICH9,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1265 	  "82801I (GT) 10/100 LAN Controller",
   1266 	  WM_T_ICH9,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1268 	  "82801I (C) LAN Controller",
   1269 	  WM_T_ICH9,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1271 	  "82801I mobile LAN Controller",
   1272 	  WM_T_ICH9,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1274 	  "82801I mobile (V) LAN Controller",
   1275 	  WM_T_ICH9,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1277 	  "82801I mobile (AMT) LAN Controller",
   1278 	  WM_T_ICH9,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1280 	  "82567LM-4 LAN Controller",
   1281 	  WM_T_ICH9,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1283 	  "82567LM-2 LAN Controller",
   1284 	  WM_T_ICH10,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1286 	  "82567LF-2 LAN Controller",
   1287 	  WM_T_ICH10,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1289 	  "82567LM-3 LAN Controller",
   1290 	  WM_T_ICH10,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1292 	  "82567LF-3 LAN Controller",
   1293 	  WM_T_ICH10,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1295 	  "82567V-2 LAN Controller",
   1296 	  WM_T_ICH10,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1298 	  "82567V-3? LAN Controller",
   1299 	  WM_T_ICH10,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1301 	  "HANKSVILLE LAN Controller",
   1302 	  WM_T_ICH10,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1304 	  "PCH LAN (82577LM) Controller",
   1305 	  WM_T_PCH,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1307 	  "PCH LAN (82577LC) Controller",
   1308 	  WM_T_PCH,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1310 	  "PCH LAN (82578DM) Controller",
   1311 	  WM_T_PCH,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1313 	  "PCH LAN (82578DC) Controller",
   1314 	  WM_T_PCH,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1316 	  "PCH2 LAN (82579LM) Controller",
   1317 	  WM_T_PCH2,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1319 	  "PCH2 LAN (82579V) Controller",
   1320 	  WM_T_PCH2,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1322 	  "82575EB dual-1000baseT Ethernet",
   1323 	  WM_T_82575,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1325 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1326 	  WM_T_82575,		WMP_F_SERDES },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1328 	  "82575GB quad-1000baseT Ethernet",
   1329 	  WM_T_82575,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1331 	  "82575GB quad-1000baseT Ethernet (PM)",
   1332 	  WM_T_82575,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1334 	  "82576 1000BaseT Ethernet",
   1335 	  WM_T_82576,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1337 	  "82576 1000BaseX Ethernet",
   1338 	  WM_T_82576,		WMP_F_FIBER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1341 	  "82576 gigabit Ethernet (SERDES)",
   1342 	  WM_T_82576,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1345 	  "82576 quad-1000BaseT Ethernet",
   1346 	  WM_T_82576,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1349 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1350 	  WM_T_82576,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1353 	  "82576 gigabit Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1357 	  "82576 gigabit Ethernet (SERDES)",
   1358 	  WM_T_82576,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1360 	  "82576 quad-gigabit Ethernet (SERDES)",
   1361 	  WM_T_82576,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1364 	  "82580 1000BaseT Ethernet",
   1365 	  WM_T_82580,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1367 	  "82580 1000BaseX Ethernet",
   1368 	  WM_T_82580,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1371 	  "82580 1000BaseT Ethernet (SERDES)",
   1372 	  WM_T_82580,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1375 	  "82580 gigabit Ethernet (SGMII)",
   1376 	  WM_T_82580,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1378 	  "82580 dual-1000BaseT Ethernet",
   1379 	  WM_T_82580,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1382 	  "82580 quad-1000BaseX Ethernet",
   1383 	  WM_T_82580,		WMP_F_FIBER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1386 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1390 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1391 	  WM_T_82580,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1394 	  "DH89XXCC 1000BASE-KX Ethernet",
   1395 	  WM_T_82580,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1398 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1402 	  "I350 Gigabit Network Connection",
   1403 	  WM_T_I350,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1406 	  "I350 Gigabit Fiber Network Connection",
   1407 	  WM_T_I350,		WMP_F_FIBER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1410 	  "I350 Gigabit Backplane Connection",
   1411 	  WM_T_I350,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1414 	  "I350 Quad Port Gigabit Ethernet",
   1415 	  WM_T_I350,		WMP_F_SERDES },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1418 	  "I350 Gigabit Connection",
   1419 	  WM_T_I350,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1422 	  "I354 Gigabit Ethernet (KX)",
   1423 	  WM_T_I354,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1426 	  "I354 Gigabit Ethernet (SGMII)",
   1427 	  WM_T_I354,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1430 	  "I354 Gigabit Ethernet (2.5G)",
   1431 	  WM_T_I354,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1434 	  "I210-T1 Ethernet Server Adapter",
   1435 	  WM_T_I210,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1438 	  "I210 Ethernet (Copper OEM)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1442 	  "I210 Ethernet (Copper IT)",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1446 	  "I210 Ethernet (FLASH less)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1450 	  "I210 Gigabit Ethernet (Fiber)",
   1451 	  WM_T_I210,		WMP_F_FIBER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1454 	  "I210 Gigabit Ethernet (SERDES)",
   1455 	  WM_T_I210,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1458 	  "I210 Gigabit Ethernet (FLASH less)",
   1459 	  WM_T_I210,		WMP_F_SERDES },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1462 	  "I210 Gigabit Ethernet (SGMII)",
   1463 	  WM_T_I210,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1466 	  "I211 Ethernet (COPPER)",
   1467 	  WM_T_I211,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1469 	  "I217 V Ethernet Connection",
   1470 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1472 	  "I217 LM Ethernet Connection",
   1473 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1475 	  "I218 V Ethernet Connection",
   1476 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1478 	  "I218 V Ethernet Connection",
   1479 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1481 	  "I218 V Ethernet Connection",
   1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1484 	  "I218 LM Ethernet Connection",
   1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1487 	  "I218 LM Ethernet Connection",
   1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1490 	  "I218 LM Ethernet Connection",
   1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1493 	  "I219 V Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1496 	  "I219 V Ethernet Connection",
   1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1499 	  "I219 V Ethernet Connection",
   1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1502 	  "I219 V Ethernet Connection",
   1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1505 	  "I219 LM Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1508 	  "I219 LM Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1511 	  "I219 LM Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1514 	  "I219 LM Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1517 	  "I219 LM Ethernet Connection",
   1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1520 	  "I219 V Ethernet Connection",
   1521 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1523 	  "I219 V Ethernet Connection",
   1524 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1529 	  "I219 LM Ethernet Connection",
   1530 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1531 	{ 0,			0,
   1532 	  NULL,
   1533 	  0,			0 },
   1534 };
   1535 
   1536 /*
   1537  * Register read/write functions.
   1538  * Other than CSR_{READ|WRITE}().
   1539  */
   1540 
   1541 #if 0 /* Not currently used */
   1542 static inline uint32_t
   1543 wm_io_read(struct wm_softc *sc, int reg)
   1544 {
   1545 
   1546 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1547 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1548 }
   1549 #endif
   1550 
   1551 static inline void
   1552 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1553 {
   1554 
   1555 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1556 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1557 }
   1558 
   1559 static inline void
   1560 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1561     uint32_t data)
   1562 {
   1563 	uint32_t regval;
   1564 	int i;
   1565 
   1566 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1567 
   1568 	CSR_WRITE(sc, reg, regval);
   1569 
   1570 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1571 		delay(5);
   1572 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1573 			break;
   1574 	}
   1575 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1576 		aprint_error("%s: WARNING:"
   1577 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1578 		    device_xname(sc->sc_dev), reg);
   1579 	}
   1580 }
   1581 
   1582 static inline void
   1583 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1584 {
   1585 	wa->wa_low = htole32(v & 0xffffffffU);
   1586 	if (sizeof(bus_addr_t) == 8)
   1587 		wa->wa_high = htole32((uint64_t) v >> 32);
   1588 	else
   1589 		wa->wa_high = 0;
   1590 }
   1591 
   1592 /*
   1593  * Descriptor sync/init functions.
   1594  */
   1595 static inline void
   1596 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1597 {
   1598 	struct wm_softc *sc = txq->txq_sc;
   1599 
   1600 	/* If it will wrap around, sync to the end of the ring. */
   1601 	if ((start + num) > WM_NTXDESC(txq)) {
   1602 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1603 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1604 		    (WM_NTXDESC(txq) - start), ops);
   1605 		num -= (WM_NTXDESC(txq) - start);
   1606 		start = 0;
   1607 	}
   1608 
   1609 	/* Now sync whatever is left. */
   1610 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1612 }
   1613 
   1614 static inline void
   1615 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1616 {
   1617 	struct wm_softc *sc = rxq->rxq_sc;
   1618 
   1619 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1620 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1621 }
   1622 
   1623 static inline void
   1624 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1625 {
   1626 	struct wm_softc *sc = rxq->rxq_sc;
   1627 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1628 	struct mbuf *m = rxs->rxs_mbuf;
   1629 
   1630 	/*
   1631 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1632 	 * so that the payload after the Ethernet header is aligned
   1633 	 * to a 4-byte boundary.
   1634 
   1635 	 * XXX BRAINDAMAGE ALERT!
   1636 	 * The stupid chip uses the same size for every buffer, which
   1637 	 * is set in the Receive Control register.  We are using the 2K
   1638 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1639 	 * reason, we can't "scoot" packets longer than the standard
   1640 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1641 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1642 	 * the upper layer copy the headers.
   1643 	 */
   1644 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1645 
   1646 	if (sc->sc_type == WM_T_82574) {
   1647 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1648 		rxd->erx_data.erxd_addr =
   1649 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1650 		rxd->erx_data.erxd_dd = 0;
   1651 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1652 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1653 
   1654 		rxd->nqrx_data.nrxd_paddr =
   1655 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1656 		/* Currently, split header is not supported. */
   1657 		rxd->nqrx_data.nrxd_haddr = 0;
   1658 	} else {
   1659 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1660 
   1661 		wm_set_dma_addr(&rxd->wrx_addr,
   1662 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1663 		rxd->wrx_len = 0;
   1664 		rxd->wrx_cksum = 0;
   1665 		rxd->wrx_status = 0;
   1666 		rxd->wrx_errors = 0;
   1667 		rxd->wrx_special = 0;
   1668 	}
   1669 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1670 
   1671 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1672 }
   1673 
   1674 /*
   1675  * Device driver interface functions and commonly used functions.
   1676  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1677  */
   1678 
   1679 /* Lookup supported device table */
   1680 static const struct wm_product *
   1681 wm_lookup(const struct pci_attach_args *pa)
   1682 {
   1683 	const struct wm_product *wmp;
   1684 
   1685 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1686 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1687 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1688 			return wmp;
   1689 	}
   1690 	return NULL;
   1691 }
   1692 
   1693 /* The match function (ca_match) */
   1694 static int
   1695 wm_match(device_t parent, cfdata_t cf, void *aux)
   1696 {
   1697 	struct pci_attach_args *pa = aux;
   1698 
   1699 	if (wm_lookup(pa) != NULL)
   1700 		return 1;
   1701 
   1702 	return 0;
   1703 }
   1704 
   1705 /* The attach function (ca_attach) */
   1706 static void
   1707 wm_attach(device_t parent, device_t self, void *aux)
   1708 {
   1709 	struct wm_softc *sc = device_private(self);
   1710 	struct pci_attach_args *pa = aux;
   1711 	prop_dictionary_t dict;
   1712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1713 	pci_chipset_tag_t pc = pa->pa_pc;
   1714 	int counts[PCI_INTR_TYPE_SIZE];
   1715 	pci_intr_type_t max_type;
   1716 	const char *eetype, *xname;
   1717 	bus_space_tag_t memt;
   1718 	bus_space_handle_t memh;
   1719 	bus_size_t memsize;
   1720 	int memh_valid;
   1721 	int i, error;
   1722 	const struct wm_product *wmp;
   1723 	prop_data_t ea;
   1724 	prop_number_t pn;
   1725 	uint8_t enaddr[ETHER_ADDR_LEN];
   1726 	char buf[256];
   1727 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1728 	pcireg_t preg, memtype;
   1729 	uint16_t eeprom_data, apme_mask;
   1730 	bool force_clear_smbi;
   1731 	uint32_t link_mode;
   1732 	uint32_t reg;
   1733 
   1734 	sc->sc_dev = self;
   1735 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1736 	sc->sc_core_stopping = false;
   1737 
   1738 	wmp = wm_lookup(pa);
   1739 #ifdef DIAGNOSTIC
   1740 	if (wmp == NULL) {
   1741 		printf("\n");
   1742 		panic("wm_attach: impossible");
   1743 	}
   1744 #endif
   1745 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1746 
   1747 	sc->sc_pc = pa->pa_pc;
   1748 	sc->sc_pcitag = pa->pa_tag;
   1749 
   1750 	if (pci_dma64_available(pa))
   1751 		sc->sc_dmat = pa->pa_dmat64;
   1752 	else
   1753 		sc->sc_dmat = pa->pa_dmat;
   1754 
   1755 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1756 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1757 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1758 
   1759 	sc->sc_type = wmp->wmp_type;
   1760 
   1761 	/* Set default function pointers */
   1762 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1763 	sc->phy.release = sc->nvm.release = wm_put_null;
   1764 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1765 
   1766 	if (sc->sc_type < WM_T_82543) {
   1767 		if (sc->sc_rev < 2) {
   1768 			aprint_error_dev(sc->sc_dev,
   1769 			    "i82542 must be at least rev. 2\n");
   1770 			return;
   1771 		}
   1772 		if (sc->sc_rev < 3)
   1773 			sc->sc_type = WM_T_82542_2_0;
   1774 	}
   1775 
   1776 	/*
   1777 	 * Disable MSI for Errata:
   1778 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1779 	 *
   1780 	 *  82544: Errata 25
   1781 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1782 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1783 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1784 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1785 	 *
   1786 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1787 	 *
   1788 	 *  82571 & 82572: Errata 63
   1789 	 */
   1790 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1791 	    || (sc->sc_type == WM_T_82572))
   1792 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1793 
   1794 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1795 	    || (sc->sc_type == WM_T_82580)
   1796 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1797 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1798 		sc->sc_flags |= WM_F_NEWQUEUE;
   1799 
   1800 	/* Set device properties (mactype) */
   1801 	dict = device_properties(sc->sc_dev);
   1802 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1803 
   1804 	/*
   1805 	 * Map the device.  All devices support memory-mapped acccess,
   1806 	 * and it is really required for normal operation.
   1807 	 */
   1808 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1809 	switch (memtype) {
   1810 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1811 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1812 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1813 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1814 		break;
   1815 	default:
   1816 		memh_valid = 0;
   1817 		break;
   1818 	}
   1819 
   1820 	if (memh_valid) {
   1821 		sc->sc_st = memt;
   1822 		sc->sc_sh = memh;
   1823 		sc->sc_ss = memsize;
   1824 	} else {
   1825 		aprint_error_dev(sc->sc_dev,
   1826 		    "unable to map device registers\n");
   1827 		return;
   1828 	}
   1829 
   1830 	/*
   1831 	 * In addition, i82544 and later support I/O mapped indirect
   1832 	 * register access.  It is not desirable (nor supported in
   1833 	 * this driver) to use it for normal operation, though it is
   1834 	 * required to work around bugs in some chip versions.
   1835 	 */
   1836 	if (sc->sc_type >= WM_T_82544) {
   1837 		/* First we have to find the I/O BAR. */
   1838 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1839 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1840 			if (memtype == PCI_MAPREG_TYPE_IO)
   1841 				break;
   1842 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1843 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1844 				i += 4;	/* skip high bits, too */
   1845 		}
   1846 		if (i < PCI_MAPREG_END) {
   1847 			/*
   1848 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1849 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1850 			 * It's no problem because newer chips has no this
   1851 			 * bug.
   1852 			 *
   1853 			 * The i8254x doesn't apparently respond when the
   1854 			 * I/O BAR is 0, which looks somewhat like it's not
   1855 			 * been configured.
   1856 			 */
   1857 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1858 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1859 				aprint_error_dev(sc->sc_dev,
   1860 				    "WARNING: I/O BAR at zero.\n");
   1861 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1862 					0, &sc->sc_iot, &sc->sc_ioh,
   1863 					NULL, &sc->sc_ios) == 0) {
   1864 				sc->sc_flags |= WM_F_IOH_VALID;
   1865 			} else {
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "WARNING: unable to map I/O space\n");
   1868 			}
   1869 		}
   1870 
   1871 	}
   1872 
   1873 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1874 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1875 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1876 	if (sc->sc_type < WM_T_82542_2_1)
   1877 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1878 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1879 
   1880 	/* power up chip */
   1881 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1882 	    NULL)) && error != EOPNOTSUPP) {
   1883 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1884 		return;
   1885 	}
   1886 
   1887 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1888 	/*
   1889 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1890 	 * resource.
   1891 	 */
   1892 	if (sc->sc_nqueues > 1) {
   1893 		max_type = PCI_INTR_TYPE_MSIX;
   1894 		/*
   1895 		 *  82583 has a MSI-X capability in the PCI configuration space
   1896 		 * but it doesn't support it. At least the document doesn't
   1897 		 * say anything about MSI-X.
   1898 		 */
   1899 		counts[PCI_INTR_TYPE_MSIX]
   1900 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1901 	} else {
   1902 		max_type = PCI_INTR_TYPE_MSI;
   1903 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1904 	}
   1905 
   1906 	/* Allocation settings */
   1907 	counts[PCI_INTR_TYPE_MSI] = 1;
   1908 	counts[PCI_INTR_TYPE_INTX] = 1;
   1909 	/* overridden by disable flags */
   1910 	if (wm_disable_msi != 0) {
   1911 		counts[PCI_INTR_TYPE_MSI] = 0;
   1912 		if (wm_disable_msix != 0) {
   1913 			max_type = PCI_INTR_TYPE_INTX;
   1914 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1915 		}
   1916 	} else if (wm_disable_msix != 0) {
   1917 		max_type = PCI_INTR_TYPE_MSI;
   1918 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1919 	}
   1920 
   1921 alloc_retry:
   1922 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1923 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1924 		return;
   1925 	}
   1926 
   1927 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1928 		error = wm_setup_msix(sc);
   1929 		if (error) {
   1930 			pci_intr_release(pc, sc->sc_intrs,
   1931 			    counts[PCI_INTR_TYPE_MSIX]);
   1932 
   1933 			/* Setup for MSI: Disable MSI-X */
   1934 			max_type = PCI_INTR_TYPE_MSI;
   1935 			counts[PCI_INTR_TYPE_MSI] = 1;
   1936 			counts[PCI_INTR_TYPE_INTX] = 1;
   1937 			goto alloc_retry;
   1938 		}
   1939 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1940 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1941 		error = wm_setup_legacy(sc);
   1942 		if (error) {
   1943 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1944 			    counts[PCI_INTR_TYPE_MSI]);
   1945 
   1946 			/* The next try is for INTx: Disable MSI */
   1947 			max_type = PCI_INTR_TYPE_INTX;
   1948 			counts[PCI_INTR_TYPE_INTX] = 1;
   1949 			goto alloc_retry;
   1950 		}
   1951 	} else {
   1952 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1953 		error = wm_setup_legacy(sc);
   1954 		if (error) {
   1955 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1956 			    counts[PCI_INTR_TYPE_INTX]);
   1957 			return;
   1958 		}
   1959 	}
   1960 
   1961 	/*
   1962 	 * Check the function ID (unit number of the chip).
   1963 	 */
   1964 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1965 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1966 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1967 	    || (sc->sc_type == WM_T_82580)
   1968 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1969 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1970 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1971 	else
   1972 		sc->sc_funcid = 0;
   1973 
   1974 	/*
   1975 	 * Determine a few things about the bus we're connected to.
   1976 	 */
   1977 	if (sc->sc_type < WM_T_82543) {
   1978 		/* We don't really know the bus characteristics here. */
   1979 		sc->sc_bus_speed = 33;
   1980 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1981 		/*
   1982 		 * CSA (Communication Streaming Architecture) is about as fast
   1983 		 * a 32-bit 66MHz PCI Bus.
   1984 		 */
   1985 		sc->sc_flags |= WM_F_CSA;
   1986 		sc->sc_bus_speed = 66;
   1987 		aprint_verbose_dev(sc->sc_dev,
   1988 		    "Communication Streaming Architecture\n");
   1989 		if (sc->sc_type == WM_T_82547) {
   1990 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1991 			callout_setfunc(&sc->sc_txfifo_ch,
   1992 					wm_82547_txfifo_stall, sc);
   1993 			aprint_verbose_dev(sc->sc_dev,
   1994 			    "using 82547 Tx FIFO stall work-around\n");
   1995 		}
   1996 	} else if (sc->sc_type >= WM_T_82571) {
   1997 		sc->sc_flags |= WM_F_PCIE;
   1998 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1999 		    && (sc->sc_type != WM_T_ICH10)
   2000 		    && (sc->sc_type != WM_T_PCH)
   2001 		    && (sc->sc_type != WM_T_PCH2)
   2002 		    && (sc->sc_type != WM_T_PCH_LPT)
   2003 		    && (sc->sc_type != WM_T_PCH_SPT)
   2004 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2005 			/* ICH* and PCH* have no PCIe capability registers */
   2006 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2007 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2008 				NULL) == 0)
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unable to find PCIe capability\n");
   2011 		}
   2012 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2013 	} else {
   2014 		reg = CSR_READ(sc, WMREG_STATUS);
   2015 		if (reg & STATUS_BUS64)
   2016 			sc->sc_flags |= WM_F_BUS64;
   2017 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2018 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2019 
   2020 			sc->sc_flags |= WM_F_PCIX;
   2021 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2022 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unable to find PCIX capability\n");
   2025 			else if (sc->sc_type != WM_T_82545_3 &&
   2026 				 sc->sc_type != WM_T_82546_3) {
   2027 				/*
   2028 				 * Work around a problem caused by the BIOS
   2029 				 * setting the max memory read byte count
   2030 				 * incorrectly.
   2031 				 */
   2032 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2033 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2034 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2035 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2036 
   2037 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2038 				    PCIX_CMD_BYTECNT_SHIFT;
   2039 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2040 				    PCIX_STATUS_MAXB_SHIFT;
   2041 				if (bytecnt > maxb) {
   2042 					aprint_verbose_dev(sc->sc_dev,
   2043 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2044 					    512 << bytecnt, 512 << maxb);
   2045 					pcix_cmd = (pcix_cmd &
   2046 					    ~PCIX_CMD_BYTECNT_MASK) |
   2047 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2048 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2049 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2050 					    pcix_cmd);
   2051 				}
   2052 			}
   2053 		}
   2054 		/*
   2055 		 * The quad port adapter is special; it has a PCIX-PCIX
   2056 		 * bridge on the board, and can run the secondary bus at
   2057 		 * a higher speed.
   2058 		 */
   2059 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2060 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2061 								      : 66;
   2062 		} else if (sc->sc_flags & WM_F_PCIX) {
   2063 			switch (reg & STATUS_PCIXSPD_MASK) {
   2064 			case STATUS_PCIXSPD_50_66:
   2065 				sc->sc_bus_speed = 66;
   2066 				break;
   2067 			case STATUS_PCIXSPD_66_100:
   2068 				sc->sc_bus_speed = 100;
   2069 				break;
   2070 			case STATUS_PCIXSPD_100_133:
   2071 				sc->sc_bus_speed = 133;
   2072 				break;
   2073 			default:
   2074 				aprint_error_dev(sc->sc_dev,
   2075 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2076 				    reg & STATUS_PCIXSPD_MASK);
   2077 				sc->sc_bus_speed = 66;
   2078 				break;
   2079 			}
   2080 		} else
   2081 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2082 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2083 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2084 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2085 	}
   2086 
   2087 	/* Disable ASPM L0s and/or L1 for workaround */
   2088 	wm_disable_aspm(sc);
   2089 
   2090 	/* clear interesting stat counters */
   2091 	CSR_READ(sc, WMREG_COLC);
   2092 	CSR_READ(sc, WMREG_RXERRC);
   2093 
   2094 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2095 	    || (sc->sc_type >= WM_T_ICH8))
   2096 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2097 	if (sc->sc_type >= WM_T_ICH8)
   2098 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2099 
   2100 	/* Set PHY, NVM mutex related stuff */
   2101 	switch (sc->sc_type) {
   2102 	case WM_T_82542_2_0:
   2103 	case WM_T_82542_2_1:
   2104 	case WM_T_82543:
   2105 	case WM_T_82544:
   2106 		/* Microwire */
   2107 		sc->nvm.read = wm_nvm_read_uwire;
   2108 		sc->sc_nvm_wordsize = 64;
   2109 		sc->sc_nvm_addrbits = 6;
   2110 		break;
   2111 	case WM_T_82540:
   2112 	case WM_T_82545:
   2113 	case WM_T_82545_3:
   2114 	case WM_T_82546:
   2115 	case WM_T_82546_3:
   2116 		/* Microwire */
   2117 		sc->nvm.read = wm_nvm_read_uwire;
   2118 		reg = CSR_READ(sc, WMREG_EECD);
   2119 		if (reg & EECD_EE_SIZE) {
   2120 			sc->sc_nvm_wordsize = 256;
   2121 			sc->sc_nvm_addrbits = 8;
   2122 		} else {
   2123 			sc->sc_nvm_wordsize = 64;
   2124 			sc->sc_nvm_addrbits = 6;
   2125 		}
   2126 		sc->sc_flags |= WM_F_LOCK_EECD;
   2127 		sc->nvm.acquire = wm_get_eecd;
   2128 		sc->nvm.release = wm_put_eecd;
   2129 		break;
   2130 	case WM_T_82541:
   2131 	case WM_T_82541_2:
   2132 	case WM_T_82547:
   2133 	case WM_T_82547_2:
   2134 		reg = CSR_READ(sc, WMREG_EECD);
   2135 		/*
   2136 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2137 		 * on 8254[17], so set flags and functios before calling it.
   2138 		 */
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		if (reg & EECD_EE_TYPE) {
   2143 			/* SPI */
   2144 			sc->nvm.read = wm_nvm_read_spi;
   2145 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 		} else {
   2148 			/* Microwire */
   2149 			sc->nvm.read = wm_nvm_read_uwire;
   2150 			if ((reg & EECD_EE_ABITS) != 0) {
   2151 				sc->sc_nvm_wordsize = 256;
   2152 				sc->sc_nvm_addrbits = 8;
   2153 			} else {
   2154 				sc->sc_nvm_wordsize = 64;
   2155 				sc->sc_nvm_addrbits = 6;
   2156 			}
   2157 		}
   2158 		break;
   2159 	case WM_T_82571:
   2160 	case WM_T_82572:
   2161 		/* SPI */
   2162 		sc->nvm.read = wm_nvm_read_eerd;
   2163 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2164 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2165 		wm_nvm_set_addrbits_size_eecd(sc);
   2166 		sc->phy.acquire = wm_get_swsm_semaphore;
   2167 		sc->phy.release = wm_put_swsm_semaphore;
   2168 		sc->nvm.acquire = wm_get_nvm_82571;
   2169 		sc->nvm.release = wm_put_nvm_82571;
   2170 		break;
   2171 	case WM_T_82573:
   2172 	case WM_T_82574:
   2173 	case WM_T_82583:
   2174 		sc->nvm.read = wm_nvm_read_eerd;
   2175 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2176 		if (sc->sc_type == WM_T_82573) {
   2177 			sc->phy.acquire = wm_get_swsm_semaphore;
   2178 			sc->phy.release = wm_put_swsm_semaphore;
   2179 			sc->nvm.acquire = wm_get_nvm_82571;
   2180 			sc->nvm.release = wm_put_nvm_82571;
   2181 		} else {
   2182 			/* Both PHY and NVM use the same semaphore. */
   2183 			sc->phy.acquire = sc->nvm.acquire
   2184 			    = wm_get_swfwhw_semaphore;
   2185 			sc->phy.release = sc->nvm.release
   2186 			    = wm_put_swfwhw_semaphore;
   2187 		}
   2188 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2189 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2190 			sc->sc_nvm_wordsize = 2048;
   2191 		} else {
   2192 			/* SPI */
   2193 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 			wm_nvm_set_addrbits_size_eecd(sc);
   2195 		}
   2196 		break;
   2197 	case WM_T_82575:
   2198 	case WM_T_82576:
   2199 	case WM_T_82580:
   2200 	case WM_T_I350:
   2201 	case WM_T_I354:
   2202 	case WM_T_80003:
   2203 		/* SPI */
   2204 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2205 		wm_nvm_set_addrbits_size_eecd(sc);
   2206 		if((sc->sc_type == WM_T_80003)
   2207 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2208 			sc->nvm.read = wm_nvm_read_eerd;
   2209 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2210 		} else {
   2211 			sc->nvm.read = wm_nvm_read_spi;
   2212 			sc->sc_flags |= WM_F_LOCK_EECD;
   2213 		}
   2214 		sc->phy.acquire = wm_get_phy_82575;
   2215 		sc->phy.release = wm_put_phy_82575;
   2216 		sc->nvm.acquire = wm_get_nvm_80003;
   2217 		sc->nvm.release = wm_put_nvm_80003;
   2218 		break;
   2219 	case WM_T_ICH8:
   2220 	case WM_T_ICH9:
   2221 	case WM_T_ICH10:
   2222 	case WM_T_PCH:
   2223 	case WM_T_PCH2:
   2224 	case WM_T_PCH_LPT:
   2225 		sc->nvm.read = wm_nvm_read_ich8;
   2226 		/* FLASH */
   2227 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2228 		sc->sc_nvm_wordsize = 2048;
   2229 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2230 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2231 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2232 			aprint_error_dev(sc->sc_dev,
   2233 			    "can't map FLASH registers\n");
   2234 			goto out;
   2235 		}
   2236 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2237 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2238 		    ICH_FLASH_SECTOR_SIZE;
   2239 		sc->sc_ich8_flash_bank_size =
   2240 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2241 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2242 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2243 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2244 		sc->sc_flashreg_offset = 0;
   2245 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2246 		sc->phy.release = wm_put_swflag_ich8lan;
   2247 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2248 		sc->nvm.release = wm_put_nvm_ich8lan;
   2249 		break;
   2250 	case WM_T_PCH_SPT:
   2251 	case WM_T_PCH_CNP:
   2252 		sc->nvm.read = wm_nvm_read_spt;
   2253 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2254 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2255 		sc->sc_flasht = sc->sc_st;
   2256 		sc->sc_flashh = sc->sc_sh;
   2257 		sc->sc_ich8_flash_base = 0;
   2258 		sc->sc_nvm_wordsize =
   2259 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2260 			* NVM_SIZE_MULTIPLIER;
   2261 		/* It is size in bytes, we want words */
   2262 		sc->sc_nvm_wordsize /= 2;
   2263 		/* assume 2 banks */
   2264 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2265 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2266 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2267 		sc->phy.release = wm_put_swflag_ich8lan;
   2268 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2269 		sc->nvm.release = wm_put_nvm_ich8lan;
   2270 		break;
   2271 	case WM_T_I210:
   2272 	case WM_T_I211:
   2273 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2274 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2275 		if (wm_nvm_flash_presence_i210(sc)) {
   2276 			sc->nvm.read = wm_nvm_read_eerd;
   2277 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2278 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2279 			wm_nvm_set_addrbits_size_eecd(sc);
   2280 		} else {
   2281 			sc->nvm.read = wm_nvm_read_invm;
   2282 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2283 			sc->sc_nvm_wordsize = INVM_SIZE;
   2284 		}
   2285 		sc->phy.acquire = wm_get_phy_82575;
   2286 		sc->phy.release = wm_put_phy_82575;
   2287 		sc->nvm.acquire = wm_get_nvm_80003;
   2288 		sc->nvm.release = wm_put_nvm_80003;
   2289 		break;
   2290 	default:
   2291 		break;
   2292 	}
   2293 
   2294 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2295 	switch (sc->sc_type) {
   2296 	case WM_T_82571:
   2297 	case WM_T_82572:
   2298 		reg = CSR_READ(sc, WMREG_SWSM2);
   2299 		if ((reg & SWSM2_LOCK) == 0) {
   2300 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2301 			force_clear_smbi = true;
   2302 		} else
   2303 			force_clear_smbi = false;
   2304 		break;
   2305 	case WM_T_82573:
   2306 	case WM_T_82574:
   2307 	case WM_T_82583:
   2308 		force_clear_smbi = true;
   2309 		break;
   2310 	default:
   2311 		force_clear_smbi = false;
   2312 		break;
   2313 	}
   2314 	if (force_clear_smbi) {
   2315 		reg = CSR_READ(sc, WMREG_SWSM);
   2316 		if ((reg & SWSM_SMBI) != 0)
   2317 			aprint_error_dev(sc->sc_dev,
   2318 			    "Please update the Bootagent\n");
   2319 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2320 	}
   2321 
   2322 	/*
   2323 	 * Defer printing the EEPROM type until after verifying the checksum
   2324 	 * This allows the EEPROM type to be printed correctly in the case
   2325 	 * that no EEPROM is attached.
   2326 	 */
   2327 	/*
   2328 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2329 	 * this for later, so we can fail future reads from the EEPROM.
   2330 	 */
   2331 	if (wm_nvm_validate_checksum(sc)) {
   2332 		/*
   2333 		 * Read twice again because some PCI-e parts fail the
   2334 		 * first check due to the link being in sleep state.
   2335 		 */
   2336 		if (wm_nvm_validate_checksum(sc))
   2337 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2338 	}
   2339 
   2340 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2341 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2342 	else {
   2343 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2344 		    sc->sc_nvm_wordsize);
   2345 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2346 			aprint_verbose("iNVM");
   2347 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2348 			aprint_verbose("FLASH(HW)");
   2349 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2350 			aprint_verbose("FLASH");
   2351 		else {
   2352 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2353 				eetype = "SPI";
   2354 			else
   2355 				eetype = "MicroWire";
   2356 			aprint_verbose("(%d address bits) %s EEPROM",
   2357 			    sc->sc_nvm_addrbits, eetype);
   2358 		}
   2359 	}
   2360 	wm_nvm_version(sc);
   2361 	aprint_verbose("\n");
   2362 
   2363 	/*
   2364 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2365 	 * incorrect.
   2366 	 */
   2367 	wm_gmii_setup_phytype(sc, 0, 0);
   2368 
   2369 	/* Reset the chip to a known state. */
   2370 	wm_reset(sc);
   2371 
   2372 	/*
   2373 	 * Check for I21[01] PLL workaround.
   2374 	 *
   2375 	 * Three cases:
   2376 	 * a) Chip is I211.
   2377 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2378 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2379 	 */
   2380 	if (sc->sc_type == WM_T_I211)
   2381 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2382 	if (sc->sc_type == WM_T_I210) {
   2383 		if (!wm_nvm_flash_presence_i210(sc))
   2384 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2385 		else if ((sc->sc_nvm_ver_major < 3)
   2386 		    || ((sc->sc_nvm_ver_major == 3)
   2387 			&& (sc->sc_nvm_ver_minor < 25))) {
   2388 			aprint_verbose_dev(sc->sc_dev,
   2389 			    "ROM image version %d.%d is older than 3.25\n",
   2390 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2392 		}
   2393 	}
   2394 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2395 		wm_pll_workaround_i210(sc);
   2396 
   2397 	wm_get_wakeup(sc);
   2398 
   2399 	/* Non-AMT based hardware can now take control from firmware */
   2400 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2401 		wm_get_hw_control(sc);
   2402 
   2403 	/*
   2404 	 * Read the Ethernet address from the EEPROM, if not first found
   2405 	 * in device properties.
   2406 	 */
   2407 	ea = prop_dictionary_get(dict, "mac-address");
   2408 	if (ea != NULL) {
   2409 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2410 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2411 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2412 	} else {
   2413 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2414 			aprint_error_dev(sc->sc_dev,
   2415 			    "unable to read Ethernet address\n");
   2416 			goto out;
   2417 		}
   2418 	}
   2419 
   2420 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2421 	    ether_sprintf(enaddr));
   2422 
   2423 	/*
   2424 	 * Read the config info from the EEPROM, and set up various
   2425 	 * bits in the control registers based on their contents.
   2426 	 */
   2427 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2428 	if (pn != NULL) {
   2429 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2430 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2431 	} else {
   2432 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2433 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2434 			goto out;
   2435 		}
   2436 	}
   2437 
   2438 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2439 	if (pn != NULL) {
   2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2441 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2442 	} else {
   2443 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	/* check for WM_F_WOL */
   2450 	switch (sc->sc_type) {
   2451 	case WM_T_82542_2_0:
   2452 	case WM_T_82542_2_1:
   2453 	case WM_T_82543:
   2454 		/* dummy? */
   2455 		eeprom_data = 0;
   2456 		apme_mask = NVM_CFG3_APME;
   2457 		break;
   2458 	case WM_T_82544:
   2459 		apme_mask = NVM_CFG2_82544_APM_EN;
   2460 		eeprom_data = cfg2;
   2461 		break;
   2462 	case WM_T_82546:
   2463 	case WM_T_82546_3:
   2464 	case WM_T_82571:
   2465 	case WM_T_82572:
   2466 	case WM_T_82573:
   2467 	case WM_T_82574:
   2468 	case WM_T_82583:
   2469 	case WM_T_80003:
   2470 	default:
   2471 		apme_mask = NVM_CFG3_APME;
   2472 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2473 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2474 		break;
   2475 	case WM_T_82575:
   2476 	case WM_T_82576:
   2477 	case WM_T_82580:
   2478 	case WM_T_I350:
   2479 	case WM_T_I354: /* XXX ok? */
   2480 	case WM_T_ICH8:
   2481 	case WM_T_ICH9:
   2482 	case WM_T_ICH10:
   2483 	case WM_T_PCH:
   2484 	case WM_T_PCH2:
   2485 	case WM_T_PCH_LPT:
   2486 	case WM_T_PCH_SPT:
   2487 	case WM_T_PCH_CNP:
   2488 		/* XXX The funcid should be checked on some devices */
   2489 		apme_mask = WUC_APME;
   2490 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2491 		break;
   2492 	}
   2493 
   2494 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2495 	if ((eeprom_data & apme_mask) != 0)
   2496 		sc->sc_flags |= WM_F_WOL;
   2497 
   2498 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2499 		/* Check NVM for autonegotiation */
   2500 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2501 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2502 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2503 		}
   2504 	}
   2505 
   2506 	/*
   2507 	 * XXX need special handling for some multiple port cards
   2508 	 * to disable a paticular port.
   2509 	 */
   2510 
   2511 	if (sc->sc_type >= WM_T_82544) {
   2512 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2513 		if (pn != NULL) {
   2514 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2515 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2516 		} else {
   2517 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2518 				aprint_error_dev(sc->sc_dev,
   2519 				    "unable to read SWDPIN\n");
   2520 				goto out;
   2521 			}
   2522 		}
   2523 	}
   2524 
   2525 	if (cfg1 & NVM_CFG1_ILOS)
   2526 		sc->sc_ctrl |= CTRL_ILOS;
   2527 
   2528 	/*
   2529 	 * XXX
   2530 	 * This code isn't correct because pin 2 and 3 are located
   2531 	 * in different position on newer chips. Check all datasheet.
   2532 	 *
   2533 	 * Until resolve this problem, check if a chip < 82580
   2534 	 */
   2535 	if (sc->sc_type <= WM_T_82580) {
   2536 		if (sc->sc_type >= WM_T_82544) {
   2537 			sc->sc_ctrl |=
   2538 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2539 			    CTRL_SWDPIO_SHIFT;
   2540 			sc->sc_ctrl |=
   2541 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2542 			    CTRL_SWDPINS_SHIFT;
   2543 		} else {
   2544 			sc->sc_ctrl |=
   2545 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2546 			    CTRL_SWDPIO_SHIFT;
   2547 		}
   2548 	}
   2549 
   2550 	/* XXX For other than 82580? */
   2551 	if (sc->sc_type == WM_T_82580) {
   2552 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2553 		if (nvmword & __BIT(13))
   2554 			sc->sc_ctrl |= CTRL_ILOS;
   2555 	}
   2556 
   2557 #if 0
   2558 	if (sc->sc_type >= WM_T_82544) {
   2559 		if (cfg1 & NVM_CFG1_IPS0)
   2560 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2561 		if (cfg1 & NVM_CFG1_IPS1)
   2562 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2563 		sc->sc_ctrl_ext |=
   2564 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2565 		    CTRL_EXT_SWDPIO_SHIFT;
   2566 		sc->sc_ctrl_ext |=
   2567 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2568 		    CTRL_EXT_SWDPINS_SHIFT;
   2569 	} else {
   2570 		sc->sc_ctrl_ext |=
   2571 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2572 		    CTRL_EXT_SWDPIO_SHIFT;
   2573 	}
   2574 #endif
   2575 
   2576 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2577 #if 0
   2578 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2579 #endif
   2580 
   2581 	if (sc->sc_type == WM_T_PCH) {
   2582 		uint16_t val;
   2583 
   2584 		/* Save the NVM K1 bit setting */
   2585 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2586 
   2587 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2588 			sc->sc_nvm_k1_enabled = 1;
   2589 		else
   2590 			sc->sc_nvm_k1_enabled = 0;
   2591 	}
   2592 
   2593 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2594 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2595 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2596 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2597 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2598 	    || sc->sc_type == WM_T_82573
   2599 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2600 		/* Copper only */
   2601 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2602 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2603 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2604 	    || (sc->sc_type ==WM_T_I211)) {
   2605 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2606 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2607 		switch (link_mode) {
   2608 		case CTRL_EXT_LINK_MODE_1000KX:
   2609 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2610 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2611 			break;
   2612 		case CTRL_EXT_LINK_MODE_SGMII:
   2613 			if (wm_sgmii_uses_mdio(sc)) {
   2614 				aprint_verbose_dev(sc->sc_dev,
   2615 				    "SGMII(MDIO)\n");
   2616 				sc->sc_flags |= WM_F_SGMII;
   2617 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2618 				break;
   2619 			}
   2620 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2621 			/*FALLTHROUGH*/
   2622 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2623 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2624 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2625 				if (link_mode
   2626 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2627 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2628 					sc->sc_flags |= WM_F_SGMII;
   2629 				} else {
   2630 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2631 					aprint_verbose_dev(sc->sc_dev,
   2632 					    "SERDES\n");
   2633 				}
   2634 				break;
   2635 			}
   2636 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2637 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2638 
   2639 			/* Change current link mode setting */
   2640 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2641 			switch (sc->sc_mediatype) {
   2642 			case WM_MEDIATYPE_COPPER:
   2643 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2644 				break;
   2645 			case WM_MEDIATYPE_SERDES:
   2646 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2647 				break;
   2648 			default:
   2649 				break;
   2650 			}
   2651 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2652 			break;
   2653 		case CTRL_EXT_LINK_MODE_GMII:
   2654 		default:
   2655 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2656 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2657 			break;
   2658 		}
   2659 
   2660 		reg &= ~CTRL_EXT_I2C_ENA;
   2661 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2662 			reg |= CTRL_EXT_I2C_ENA;
   2663 		else
   2664 			reg &= ~CTRL_EXT_I2C_ENA;
   2665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2666 	} else if (sc->sc_type < WM_T_82543 ||
   2667 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2668 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2669 			aprint_error_dev(sc->sc_dev,
   2670 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2671 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2672 		}
   2673 	} else {
   2674 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2675 			aprint_error_dev(sc->sc_dev,
   2676 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2677 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2678 		}
   2679 	}
   2680 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2681 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2682 
   2683 	/* Set device properties (macflags) */
   2684 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2685 
   2686 	/* Initialize the media structures accordingly. */
   2687 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2688 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2689 	else
   2690 		wm_tbi_mediainit(sc); /* All others */
   2691 
   2692 	ifp = &sc->sc_ethercom.ec_if;
   2693 	xname = device_xname(sc->sc_dev);
   2694 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2695 	ifp->if_softc = sc;
   2696 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2697 #ifdef WM_MPSAFE
   2698 	ifp->if_extflags = IFEF_MPSAFE;
   2699 #endif
   2700 	ifp->if_ioctl = wm_ioctl;
   2701 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2702 		ifp->if_start = wm_nq_start;
   2703 		/*
   2704 		 * When the number of CPUs is one and the controller can use
   2705 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2706 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2707 		 * and the other is used for link status changing.
   2708 		 * In this situation, wm_nq_transmit() is disadvantageous
   2709 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2710 		 */
   2711 		if (wm_is_using_multiqueue(sc))
   2712 			ifp->if_transmit = wm_nq_transmit;
   2713 	} else {
   2714 		ifp->if_start = wm_start;
   2715 		/*
   2716 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2717 		 */
   2718 		if (wm_is_using_multiqueue(sc))
   2719 			ifp->if_transmit = wm_transmit;
   2720 	}
   2721 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2722 	ifp->if_init = wm_init;
   2723 	ifp->if_stop = wm_stop;
   2724 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2725 	IFQ_SET_READY(&ifp->if_snd);
   2726 
   2727 	/* Check for jumbo frame */
   2728 	switch (sc->sc_type) {
   2729 	case WM_T_82573:
   2730 		/* XXX limited to 9234 if ASPM is disabled */
   2731 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2732 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2733 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2734 		break;
   2735 	case WM_T_82571:
   2736 	case WM_T_82572:
   2737 	case WM_T_82574:
   2738 	case WM_T_82583:
   2739 	case WM_T_82575:
   2740 	case WM_T_82576:
   2741 	case WM_T_82580:
   2742 	case WM_T_I350:
   2743 	case WM_T_I354:
   2744 	case WM_T_I210:
   2745 	case WM_T_I211:
   2746 	case WM_T_80003:
   2747 	case WM_T_ICH9:
   2748 	case WM_T_ICH10:
   2749 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2750 	case WM_T_PCH_LPT:
   2751 	case WM_T_PCH_SPT:
   2752 	case WM_T_PCH_CNP:
   2753 		/* XXX limited to 9234 */
   2754 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2755 		break;
   2756 	case WM_T_PCH:
   2757 		/* XXX limited to 4096 */
   2758 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2759 		break;
   2760 	case WM_T_82542_2_0:
   2761 	case WM_T_82542_2_1:
   2762 	case WM_T_ICH8:
   2763 		/* No support for jumbo frame */
   2764 		break;
   2765 	default:
   2766 		/* ETHER_MAX_LEN_JUMBO */
   2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2768 		break;
   2769 	}
   2770 
   2771 	/* If we're a i82543 or greater, we can support VLANs. */
   2772 	if (sc->sc_type >= WM_T_82543)
   2773 		sc->sc_ethercom.ec_capabilities |=
   2774 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2775 
   2776 	/*
   2777 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2778 	 * on i82543 and later.
   2779 	 */
   2780 	if (sc->sc_type >= WM_T_82543) {
   2781 		ifp->if_capabilities |=
   2782 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2783 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2784 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2785 		    IFCAP_CSUM_TCPv6_Tx |
   2786 		    IFCAP_CSUM_UDPv6_Tx;
   2787 	}
   2788 
   2789 	/*
   2790 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2791 	 *
   2792 	 *	82541GI (8086:1076) ... no
   2793 	 *	82572EI (8086:10b9) ... yes
   2794 	 */
   2795 	if (sc->sc_type >= WM_T_82571) {
   2796 		ifp->if_capabilities |=
   2797 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2798 	}
   2799 
   2800 	/*
   2801 	 * If we're a i82544 or greater (except i82547), we can do
   2802 	 * TCP segmentation offload.
   2803 	 */
   2804 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2805 		ifp->if_capabilities |= IFCAP_TSOv4;
   2806 	}
   2807 
   2808 	if (sc->sc_type >= WM_T_82571) {
   2809 		ifp->if_capabilities |= IFCAP_TSOv6;
   2810 	}
   2811 
   2812 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2813 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2814 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2815 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2816 
   2817 #ifdef WM_MPSAFE
   2818 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2819 #else
   2820 	sc->sc_core_lock = NULL;
   2821 #endif
   2822 
   2823 	/* Attach the interface. */
   2824 	error = if_initialize(ifp);
   2825 	if (error != 0) {
   2826 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2827 		    error);
   2828 		return; /* Error */
   2829 	}
   2830 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2831 	ether_ifattach(ifp, enaddr);
   2832 	if_register(ifp);
   2833 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2834 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2835 			  RND_FLAG_DEFAULT);
   2836 
   2837 #ifdef WM_EVENT_COUNTERS
   2838 	/* Attach event counters. */
   2839 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2840 	    NULL, xname, "linkintr");
   2841 
   2842 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2843 	    NULL, xname, "tx_xoff");
   2844 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2845 	    NULL, xname, "tx_xon");
   2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2847 	    NULL, xname, "rx_xoff");
   2848 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2849 	    NULL, xname, "rx_xon");
   2850 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2851 	    NULL, xname, "rx_macctl");
   2852 #endif /* WM_EVENT_COUNTERS */
   2853 
   2854 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2855 		pmf_class_network_register(self, ifp);
   2856 	else
   2857 		aprint_error_dev(self, "couldn't establish power handler\n");
   2858 
   2859 	sc->sc_flags |= WM_F_ATTACHED;
   2860  out:
   2861 	return;
   2862 }
   2863 
   2864 /* The detach function (ca_detach) */
   2865 static int
   2866 wm_detach(device_t self, int flags __unused)
   2867 {
   2868 	struct wm_softc *sc = device_private(self);
   2869 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2870 	int i;
   2871 
   2872 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2873 		return 0;
   2874 
   2875 	/* Stop the interface. Callouts are stopped in it. */
   2876 	wm_stop(ifp, 1);
   2877 
   2878 	pmf_device_deregister(self);
   2879 
   2880 #ifdef WM_EVENT_COUNTERS
   2881 	evcnt_detach(&sc->sc_ev_linkintr);
   2882 
   2883 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2884 	evcnt_detach(&sc->sc_ev_tx_xon);
   2885 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2886 	evcnt_detach(&sc->sc_ev_rx_xon);
   2887 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2888 #endif /* WM_EVENT_COUNTERS */
   2889 
   2890 	/* Tell the firmware about the release */
   2891 	WM_CORE_LOCK(sc);
   2892 	wm_release_manageability(sc);
   2893 	wm_release_hw_control(sc);
   2894 	wm_enable_wakeup(sc);
   2895 	WM_CORE_UNLOCK(sc);
   2896 
   2897 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2898 
   2899 	/* Delete all remaining media. */
   2900 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2901 
   2902 	ether_ifdetach(ifp);
   2903 	if_detach(ifp);
   2904 	if_percpuq_destroy(sc->sc_ipq);
   2905 
   2906 	/* Unload RX dmamaps and free mbufs */
   2907 	for (i = 0; i < sc->sc_nqueues; i++) {
   2908 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2909 		mutex_enter(rxq->rxq_lock);
   2910 		wm_rxdrain(rxq);
   2911 		mutex_exit(rxq->rxq_lock);
   2912 	}
   2913 	/* Must unlock here */
   2914 
   2915 	/* Disestablish the interrupt handler */
   2916 	for (i = 0; i < sc->sc_nintrs; i++) {
   2917 		if (sc->sc_ihs[i] != NULL) {
   2918 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2919 			sc->sc_ihs[i] = NULL;
   2920 		}
   2921 	}
   2922 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2923 
   2924 	wm_free_txrx_queues(sc);
   2925 
   2926 	/* Unmap the registers */
   2927 	if (sc->sc_ss) {
   2928 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2929 		sc->sc_ss = 0;
   2930 	}
   2931 	if (sc->sc_ios) {
   2932 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2933 		sc->sc_ios = 0;
   2934 	}
   2935 	if (sc->sc_flashs) {
   2936 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2937 		sc->sc_flashs = 0;
   2938 	}
   2939 
   2940 	if (sc->sc_core_lock)
   2941 		mutex_obj_free(sc->sc_core_lock);
   2942 	if (sc->sc_ich_phymtx)
   2943 		mutex_obj_free(sc->sc_ich_phymtx);
   2944 	if (sc->sc_ich_nvmmtx)
   2945 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2946 
   2947 	return 0;
   2948 }
   2949 
   2950 static bool
   2951 wm_suspend(device_t self, const pmf_qual_t *qual)
   2952 {
   2953 	struct wm_softc *sc = device_private(self);
   2954 
   2955 	wm_release_manageability(sc);
   2956 	wm_release_hw_control(sc);
   2957 	wm_enable_wakeup(sc);
   2958 
   2959 	return true;
   2960 }
   2961 
   2962 static bool
   2963 wm_resume(device_t self, const pmf_qual_t *qual)
   2964 {
   2965 	struct wm_softc *sc = device_private(self);
   2966 
   2967 	/* Disable ASPM L0s and/or L1 for workaround */
   2968 	wm_disable_aspm(sc);
   2969 	wm_init_manageability(sc);
   2970 
   2971 	return true;
   2972 }
   2973 
   2974 /*
   2975  * wm_watchdog:		[ifnet interface function]
   2976  *
   2977  *	Watchdog timer handler.
   2978  */
   2979 static void
   2980 wm_watchdog(struct ifnet *ifp)
   2981 {
   2982 	int qid;
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2985 
   2986 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2987 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2988 
   2989 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2990 	}
   2991 
   2992 	/*
   2993 	 * IF any of queues hanged up, reset the interface.
   2994 	 */
   2995 	if (hang_queue != 0) {
   2996 		(void) wm_init(ifp);
   2997 
   2998 		/*
   2999 		 * There are still some upper layer processing which call
   3000 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3001 		 */
   3002 		/* Try to get more packets going. */
   3003 		ifp->if_start(ifp);
   3004 	}
   3005 }
   3006 
   3007 
   3008 static void
   3009 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3010 {
   3011 
   3012 	mutex_enter(txq->txq_lock);
   3013 	if (txq->txq_watchdog &&
   3014 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3015 		wm_watchdog_txq_locked(ifp, txq, hang);
   3016 	}
   3017 	mutex_exit(txq->txq_lock);
   3018 }
   3019 
   3020 static void
   3021 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3022     uint16_t *hang)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3026 
   3027 	KASSERT(mutex_owned(txq->txq_lock));
   3028 
   3029 	/*
   3030 	 * Since we're using delayed interrupts, sweep up
   3031 	 * before we report an error.
   3032 	 */
   3033 	wm_txeof(txq, UINT_MAX);
   3034 	if (txq->txq_watchdog)
   3035 		*hang |= __BIT(wmq->wmq_id);
   3036 
   3037 	if (txq->txq_free != WM_NTXDESC(txq)) {
   3038 #ifdef WM_DEBUG
   3039 		int i, j;
   3040 		struct wm_txsoft *txs;
   3041 #endif
   3042 		log(LOG_ERR,
   3043 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3044 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3045 		    txq->txq_next);
   3046 		ifp->if_oerrors++;
   3047 #ifdef WM_DEBUG
   3048 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3049 		    i = WM_NEXTTXS(txq, i)) {
   3050 		    txs = &txq->txq_soft[i];
   3051 		    printf("txs %d tx %d -> %d\n",
   3052 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3053 		    for (j = txs->txs_firstdesc; ;
   3054 			j = WM_NEXTTX(txq, j)) {
   3055 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3056 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3057 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3058 				    printf("\t %#08x%08x\n",
   3059 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3060 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3061 			    } else {
   3062 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3063 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3064 					txq->txq_descs[j].wtx_addr.wa_low);
   3065 				    printf("\t %#04x%02x%02x%08x\n",
   3066 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3067 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3068 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3069 					txq->txq_descs[j].wtx_cmdlen);
   3070 			    }
   3071 			if (j == txs->txs_lastdesc)
   3072 				break;
   3073 			}
   3074 		}
   3075 #endif
   3076 	}
   3077 }
   3078 
   3079 /*
   3080  * wm_tick:
   3081  *
   3082  *	One second timer, used to check link status, sweep up
   3083  *	completed transmit jobs, etc.
   3084  */
   3085 static void
   3086 wm_tick(void *arg)
   3087 {
   3088 	struct wm_softc *sc = arg;
   3089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3090 #ifndef WM_MPSAFE
   3091 	int s = splnet();
   3092 #endif
   3093 
   3094 	WM_CORE_LOCK(sc);
   3095 
   3096 	if (sc->sc_core_stopping) {
   3097 		WM_CORE_UNLOCK(sc);
   3098 #ifndef WM_MPSAFE
   3099 		splx(s);
   3100 #endif
   3101 		return;
   3102 	}
   3103 
   3104 	if (sc->sc_type >= WM_T_82542_2_1) {
   3105 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3106 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3107 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3108 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3109 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3110 	}
   3111 
   3112 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3113 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3114 	    + CSR_READ(sc, WMREG_CRCERRS)
   3115 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3116 	    + CSR_READ(sc, WMREG_SYMERRC)
   3117 	    + CSR_READ(sc, WMREG_RXERRC)
   3118 	    + CSR_READ(sc, WMREG_SEC)
   3119 	    + CSR_READ(sc, WMREG_CEXTERR)
   3120 	    + CSR_READ(sc, WMREG_RLEC);
   3121 	/*
   3122 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3123 	 * memory. It does not mean the number of dropped packet. Because
   3124 	 * ethernet controller can receive packets in such case if there is
   3125 	 * space in phy's FIFO.
   3126 	 *
   3127 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3128 	 * own EVCNT instead of if_iqdrops.
   3129 	 */
   3130 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3131 
   3132 	if (sc->sc_flags & WM_F_HAS_MII)
   3133 		mii_tick(&sc->sc_mii);
   3134 	else if ((sc->sc_type >= WM_T_82575)
   3135 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3136 		wm_serdes_tick(sc);
   3137 	else
   3138 		wm_tbi_tick(sc);
   3139 
   3140 	WM_CORE_UNLOCK(sc);
   3141 
   3142 	wm_watchdog(ifp);
   3143 
   3144 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3145 }
   3146 
   3147 static int
   3148 wm_ifflags_cb(struct ethercom *ec)
   3149 {
   3150 	struct ifnet *ifp = &ec->ec_if;
   3151 	struct wm_softc *sc = ifp->if_softc;
   3152 	int rc = 0;
   3153 
   3154 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3155 		device_xname(sc->sc_dev), __func__));
   3156 
   3157 	WM_CORE_LOCK(sc);
   3158 
   3159 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3160 	sc->sc_if_flags = ifp->if_flags;
   3161 
   3162 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3163 		rc = ENETRESET;
   3164 		goto out;
   3165 	}
   3166 
   3167 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3168 		wm_set_filter(sc);
   3169 
   3170 	wm_set_vlan(sc);
   3171 
   3172 out:
   3173 	WM_CORE_UNLOCK(sc);
   3174 
   3175 	return rc;
   3176 }
   3177 
   3178 /*
   3179  * wm_ioctl:		[ifnet interface function]
   3180  *
   3181  *	Handle control requests from the operator.
   3182  */
   3183 static int
   3184 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3185 {
   3186 	struct wm_softc *sc = ifp->if_softc;
   3187 	struct ifreq *ifr = (struct ifreq *) data;
   3188 	struct ifaddr *ifa = (struct ifaddr *)data;
   3189 	struct sockaddr_dl *sdl;
   3190 	int s, error;
   3191 
   3192 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3193 		device_xname(sc->sc_dev), __func__));
   3194 
   3195 #ifndef WM_MPSAFE
   3196 	s = splnet();
   3197 #endif
   3198 	switch (cmd) {
   3199 	case SIOCSIFMEDIA:
   3200 	case SIOCGIFMEDIA:
   3201 		WM_CORE_LOCK(sc);
   3202 		/* Flow control requires full-duplex mode. */
   3203 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3204 		    (ifr->ifr_media & IFM_FDX) == 0)
   3205 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3206 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3207 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3208 				/* We can do both TXPAUSE and RXPAUSE. */
   3209 				ifr->ifr_media |=
   3210 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3211 			}
   3212 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3213 		}
   3214 		WM_CORE_UNLOCK(sc);
   3215 #ifdef WM_MPSAFE
   3216 		s = splnet();
   3217 #endif
   3218 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3219 #ifdef WM_MPSAFE
   3220 		splx(s);
   3221 #endif
   3222 		break;
   3223 	case SIOCINITIFADDR:
   3224 		WM_CORE_LOCK(sc);
   3225 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3226 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3227 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3228 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3229 			/* unicast address is first multicast entry */
   3230 			wm_set_filter(sc);
   3231 			error = 0;
   3232 			WM_CORE_UNLOCK(sc);
   3233 			break;
   3234 		}
   3235 		WM_CORE_UNLOCK(sc);
   3236 		/*FALLTHROUGH*/
   3237 	default:
   3238 #ifdef WM_MPSAFE
   3239 		s = splnet();
   3240 #endif
   3241 		/* It may call wm_start, so unlock here */
   3242 		error = ether_ioctl(ifp, cmd, data);
   3243 #ifdef WM_MPSAFE
   3244 		splx(s);
   3245 #endif
   3246 		if (error != ENETRESET)
   3247 			break;
   3248 
   3249 		error = 0;
   3250 
   3251 		if (cmd == SIOCSIFCAP) {
   3252 			error = (*ifp->if_init)(ifp);
   3253 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3254 			;
   3255 		else if (ifp->if_flags & IFF_RUNNING) {
   3256 			/*
   3257 			 * Multicast list has changed; set the hardware filter
   3258 			 * accordingly.
   3259 			 */
   3260 			WM_CORE_LOCK(sc);
   3261 			wm_set_filter(sc);
   3262 			WM_CORE_UNLOCK(sc);
   3263 		}
   3264 		break;
   3265 	}
   3266 
   3267 #ifndef WM_MPSAFE
   3268 	splx(s);
   3269 #endif
   3270 	return error;
   3271 }
   3272 
   3273 /* MAC address related */
   3274 
   3275 /*
   3276  * Get the offset of MAC address and return it.
   3277  * If error occured, use offset 0.
   3278  */
   3279 static uint16_t
   3280 wm_check_alt_mac_addr(struct wm_softc *sc)
   3281 {
   3282 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3283 	uint16_t offset = NVM_OFF_MACADDR;
   3284 
   3285 	/* Try to read alternative MAC address pointer */
   3286 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3287 		return 0;
   3288 
   3289 	/* Check pointer if it's valid or not. */
   3290 	if ((offset == 0x0000) || (offset == 0xffff))
   3291 		return 0;
   3292 
   3293 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3294 	/*
   3295 	 * Check whether alternative MAC address is valid or not.
   3296 	 * Some cards have non 0xffff pointer but those don't use
   3297 	 * alternative MAC address in reality.
   3298 	 *
   3299 	 * Check whether the broadcast bit is set or not.
   3300 	 */
   3301 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3302 		if (((myea[0] & 0xff) & 0x01) == 0)
   3303 			return offset; /* Found */
   3304 
   3305 	/* Not found */
   3306 	return 0;
   3307 }
   3308 
   3309 static int
   3310 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3311 {
   3312 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3313 	uint16_t offset = NVM_OFF_MACADDR;
   3314 	int do_invert = 0;
   3315 
   3316 	switch (sc->sc_type) {
   3317 	case WM_T_82580:
   3318 	case WM_T_I350:
   3319 	case WM_T_I354:
   3320 		/* EEPROM Top Level Partitioning */
   3321 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3322 		break;
   3323 	case WM_T_82571:
   3324 	case WM_T_82575:
   3325 	case WM_T_82576:
   3326 	case WM_T_80003:
   3327 	case WM_T_I210:
   3328 	case WM_T_I211:
   3329 		offset = wm_check_alt_mac_addr(sc);
   3330 		if (offset == 0)
   3331 			if ((sc->sc_funcid & 0x01) == 1)
   3332 				do_invert = 1;
   3333 		break;
   3334 	default:
   3335 		if ((sc->sc_funcid & 0x01) == 1)
   3336 			do_invert = 1;
   3337 		break;
   3338 	}
   3339 
   3340 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3341 		goto bad;
   3342 
   3343 	enaddr[0] = myea[0] & 0xff;
   3344 	enaddr[1] = myea[0] >> 8;
   3345 	enaddr[2] = myea[1] & 0xff;
   3346 	enaddr[3] = myea[1] >> 8;
   3347 	enaddr[4] = myea[2] & 0xff;
   3348 	enaddr[5] = myea[2] >> 8;
   3349 
   3350 	/*
   3351 	 * Toggle the LSB of the MAC address on the second port
   3352 	 * of some dual port cards.
   3353 	 */
   3354 	if (do_invert != 0)
   3355 		enaddr[5] ^= 1;
   3356 
   3357 	return 0;
   3358 
   3359  bad:
   3360 	return -1;
   3361 }
   3362 
   3363 /*
   3364  * wm_set_ral:
   3365  *
   3366  *	Set an entery in the receive address list.
   3367  */
   3368 static void
   3369 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3370 {
   3371 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3372 	uint32_t wlock_mac;
   3373 	int rv;
   3374 
   3375 	if (enaddr != NULL) {
   3376 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3377 		    (enaddr[3] << 24);
   3378 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3379 		ral_hi |= RAL_AV;
   3380 	} else {
   3381 		ral_lo = 0;
   3382 		ral_hi = 0;
   3383 	}
   3384 
   3385 	switch (sc->sc_type) {
   3386 	case WM_T_82542_2_0:
   3387 	case WM_T_82542_2_1:
   3388 	case WM_T_82543:
   3389 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3390 		CSR_WRITE_FLUSH(sc);
   3391 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3392 		CSR_WRITE_FLUSH(sc);
   3393 		break;
   3394 	case WM_T_PCH2:
   3395 	case WM_T_PCH_LPT:
   3396 	case WM_T_PCH_SPT:
   3397 	case WM_T_PCH_CNP:
   3398 		if (idx == 0) {
   3399 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3400 			CSR_WRITE_FLUSH(sc);
   3401 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3402 			CSR_WRITE_FLUSH(sc);
   3403 			return;
   3404 		}
   3405 		if (sc->sc_type != WM_T_PCH2) {
   3406 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3407 			    FWSM_WLOCK_MAC);
   3408 			addrl = WMREG_SHRAL(idx - 1);
   3409 			addrh = WMREG_SHRAH(idx - 1);
   3410 		} else {
   3411 			wlock_mac = 0;
   3412 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3413 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3414 		}
   3415 
   3416 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3417 			rv = wm_get_swflag_ich8lan(sc);
   3418 			if (rv != 0)
   3419 				return;
   3420 			CSR_WRITE(sc, addrl, ral_lo);
   3421 			CSR_WRITE_FLUSH(sc);
   3422 			CSR_WRITE(sc, addrh, ral_hi);
   3423 			CSR_WRITE_FLUSH(sc);
   3424 			wm_put_swflag_ich8lan(sc);
   3425 		}
   3426 
   3427 		break;
   3428 	default:
   3429 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3430 		CSR_WRITE_FLUSH(sc);
   3431 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3432 		CSR_WRITE_FLUSH(sc);
   3433 		break;
   3434 	}
   3435 }
   3436 
   3437 /*
   3438  * wm_mchash:
   3439  *
   3440  *	Compute the hash of the multicast address for the 4096-bit
   3441  *	multicast filter.
   3442  */
   3443 static uint32_t
   3444 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3445 {
   3446 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3447 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3448 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3449 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3450 	uint32_t hash;
   3451 
   3452 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3453 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3454 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3455 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3456 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3457 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3458 		return (hash & 0x3ff);
   3459 	}
   3460 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3461 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3462 
   3463 	return (hash & 0xfff);
   3464 }
   3465 
   3466 /*
   3467  * wm_set_filter:
   3468  *
   3469  *	Set up the receive filter.
   3470  */
   3471 static void
   3472 wm_set_filter(struct wm_softc *sc)
   3473 {
   3474 	struct ethercom *ec = &sc->sc_ethercom;
   3475 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3476 	struct ether_multi *enm;
   3477 	struct ether_multistep step;
   3478 	bus_addr_t mta_reg;
   3479 	uint32_t hash, reg, bit;
   3480 	int i, size, ralmax;
   3481 
   3482 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3483 		device_xname(sc->sc_dev), __func__));
   3484 
   3485 	if (sc->sc_type >= WM_T_82544)
   3486 		mta_reg = WMREG_CORDOVA_MTA;
   3487 	else
   3488 		mta_reg = WMREG_MTA;
   3489 
   3490 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3491 
   3492 	if (ifp->if_flags & IFF_BROADCAST)
   3493 		sc->sc_rctl |= RCTL_BAM;
   3494 	if (ifp->if_flags & IFF_PROMISC) {
   3495 		sc->sc_rctl |= RCTL_UPE;
   3496 		goto allmulti;
   3497 	}
   3498 
   3499 	/*
   3500 	 * Set the station address in the first RAL slot, and
   3501 	 * clear the remaining slots.
   3502 	 */
   3503 	if (sc->sc_type == WM_T_ICH8)
   3504 		size = WM_RAL_TABSIZE_ICH8 -1;
   3505 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3506 	    || (sc->sc_type == WM_T_PCH))
   3507 		size = WM_RAL_TABSIZE_ICH8;
   3508 	else if (sc->sc_type == WM_T_PCH2)
   3509 		size = WM_RAL_TABSIZE_PCH2;
   3510 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3511 	    || (sc->sc_type == WM_T_PCH_CNP))
   3512 		size = WM_RAL_TABSIZE_PCH_LPT;
   3513 	else if (sc->sc_type == WM_T_82575)
   3514 		size = WM_RAL_TABSIZE_82575;
   3515 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3516 		size = WM_RAL_TABSIZE_82576;
   3517 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3518 		size = WM_RAL_TABSIZE_I350;
   3519 	else
   3520 		size = WM_RAL_TABSIZE;
   3521 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3522 
   3523 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3524 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3525 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3526 		switch (i) {
   3527 		case 0:
   3528 			/* We can use all entries */
   3529 			ralmax = size;
   3530 			break;
   3531 		case 1:
   3532 			/* Only RAR[0] */
   3533 			ralmax = 1;
   3534 			break;
   3535 		default:
   3536 			/* available SHRA + RAR[0] */
   3537 			ralmax = i + 1;
   3538 		}
   3539 	} else
   3540 		ralmax = size;
   3541 	for (i = 1; i < size; i++) {
   3542 		if (i < ralmax)
   3543 			wm_set_ral(sc, NULL, i);
   3544 	}
   3545 
   3546 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3547 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3548 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3549 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3550 		size = WM_ICH8_MC_TABSIZE;
   3551 	else
   3552 		size = WM_MC_TABSIZE;
   3553 	/* Clear out the multicast table. */
   3554 	for (i = 0; i < size; i++) {
   3555 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3556 		CSR_WRITE_FLUSH(sc);
   3557 	}
   3558 
   3559 	ETHER_LOCK(ec);
   3560 	ETHER_FIRST_MULTI(step, ec, enm);
   3561 	while (enm != NULL) {
   3562 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3563 			ETHER_UNLOCK(ec);
   3564 			/*
   3565 			 * We must listen to a range of multicast addresses.
   3566 			 * For now, just accept all multicasts, rather than
   3567 			 * trying to set only those filter bits needed to match
   3568 			 * the range.  (At this time, the only use of address
   3569 			 * ranges is for IP multicast routing, for which the
   3570 			 * range is big enough to require all bits set.)
   3571 			 */
   3572 			goto allmulti;
   3573 		}
   3574 
   3575 		hash = wm_mchash(sc, enm->enm_addrlo);
   3576 
   3577 		reg = (hash >> 5);
   3578 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3579 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3580 		    || (sc->sc_type == WM_T_PCH2)
   3581 		    || (sc->sc_type == WM_T_PCH_LPT)
   3582 		    || (sc->sc_type == WM_T_PCH_SPT)
   3583 		    || (sc->sc_type == WM_T_PCH_CNP))
   3584 			reg &= 0x1f;
   3585 		else
   3586 			reg &= 0x7f;
   3587 		bit = hash & 0x1f;
   3588 
   3589 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3590 		hash |= 1U << bit;
   3591 
   3592 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3593 			/*
   3594 			 * 82544 Errata 9: Certain register cannot be written
   3595 			 * with particular alignments in PCI-X bus operation
   3596 			 * (FCAH, MTA and VFTA).
   3597 			 */
   3598 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3599 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3600 			CSR_WRITE_FLUSH(sc);
   3601 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3602 			CSR_WRITE_FLUSH(sc);
   3603 		} else {
   3604 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3605 			CSR_WRITE_FLUSH(sc);
   3606 		}
   3607 
   3608 		ETHER_NEXT_MULTI(step, enm);
   3609 	}
   3610 	ETHER_UNLOCK(ec);
   3611 
   3612 	ifp->if_flags &= ~IFF_ALLMULTI;
   3613 	goto setit;
   3614 
   3615  allmulti:
   3616 	ifp->if_flags |= IFF_ALLMULTI;
   3617 	sc->sc_rctl |= RCTL_MPE;
   3618 
   3619  setit:
   3620 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3621 }
   3622 
   3623 /* Reset and init related */
   3624 
   3625 static void
   3626 wm_set_vlan(struct wm_softc *sc)
   3627 {
   3628 
   3629 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3630 		device_xname(sc->sc_dev), __func__));
   3631 
   3632 	/* Deal with VLAN enables. */
   3633 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3634 		sc->sc_ctrl |= CTRL_VME;
   3635 	else
   3636 		sc->sc_ctrl &= ~CTRL_VME;
   3637 
   3638 	/* Write the control registers. */
   3639 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3640 }
   3641 
   3642 static void
   3643 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3644 {
   3645 	uint32_t gcr;
   3646 	pcireg_t ctrl2;
   3647 
   3648 	gcr = CSR_READ(sc, WMREG_GCR);
   3649 
   3650 	/* Only take action if timeout value is defaulted to 0 */
   3651 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3652 		goto out;
   3653 
   3654 	if ((gcr & GCR_CAP_VER2) == 0) {
   3655 		gcr |= GCR_CMPL_TMOUT_10MS;
   3656 		goto out;
   3657 	}
   3658 
   3659 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3660 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3661 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3662 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3663 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3664 
   3665 out:
   3666 	/* Disable completion timeout resend */
   3667 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3668 
   3669 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3670 }
   3671 
   3672 void
   3673 wm_get_auto_rd_done(struct wm_softc *sc)
   3674 {
   3675 	int i;
   3676 
   3677 	/* wait for eeprom to reload */
   3678 	switch (sc->sc_type) {
   3679 	case WM_T_82571:
   3680 	case WM_T_82572:
   3681 	case WM_T_82573:
   3682 	case WM_T_82574:
   3683 	case WM_T_82583:
   3684 	case WM_T_82575:
   3685 	case WM_T_82576:
   3686 	case WM_T_82580:
   3687 	case WM_T_I350:
   3688 	case WM_T_I354:
   3689 	case WM_T_I210:
   3690 	case WM_T_I211:
   3691 	case WM_T_80003:
   3692 	case WM_T_ICH8:
   3693 	case WM_T_ICH9:
   3694 		for (i = 0; i < 10; i++) {
   3695 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3696 				break;
   3697 			delay(1000);
   3698 		}
   3699 		if (i == 10) {
   3700 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3701 			    "complete\n", device_xname(sc->sc_dev));
   3702 		}
   3703 		break;
   3704 	default:
   3705 		break;
   3706 	}
   3707 }
   3708 
   3709 void
   3710 wm_lan_init_done(struct wm_softc *sc)
   3711 {
   3712 	uint32_t reg = 0;
   3713 	int i;
   3714 
   3715 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3716 		device_xname(sc->sc_dev), __func__));
   3717 
   3718 	/* Wait for eeprom to reload */
   3719 	switch (sc->sc_type) {
   3720 	case WM_T_ICH10:
   3721 	case WM_T_PCH:
   3722 	case WM_T_PCH2:
   3723 	case WM_T_PCH_LPT:
   3724 	case WM_T_PCH_SPT:
   3725 	case WM_T_PCH_CNP:
   3726 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3727 			reg = CSR_READ(sc, WMREG_STATUS);
   3728 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3729 				break;
   3730 			delay(100);
   3731 		}
   3732 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3733 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3734 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3735 		}
   3736 		break;
   3737 	default:
   3738 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3739 		    __func__);
   3740 		break;
   3741 	}
   3742 
   3743 	reg &= ~STATUS_LAN_INIT_DONE;
   3744 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3745 }
   3746 
   3747 void
   3748 wm_get_cfg_done(struct wm_softc *sc)
   3749 {
   3750 	int mask;
   3751 	uint32_t reg;
   3752 	int i;
   3753 
   3754 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3755 		device_xname(sc->sc_dev), __func__));
   3756 
   3757 	/* Wait for eeprom to reload */
   3758 	switch (sc->sc_type) {
   3759 	case WM_T_82542_2_0:
   3760 	case WM_T_82542_2_1:
   3761 		/* null */
   3762 		break;
   3763 	case WM_T_82543:
   3764 	case WM_T_82544:
   3765 	case WM_T_82540:
   3766 	case WM_T_82545:
   3767 	case WM_T_82545_3:
   3768 	case WM_T_82546:
   3769 	case WM_T_82546_3:
   3770 	case WM_T_82541:
   3771 	case WM_T_82541_2:
   3772 	case WM_T_82547:
   3773 	case WM_T_82547_2:
   3774 	case WM_T_82573:
   3775 	case WM_T_82574:
   3776 	case WM_T_82583:
   3777 		/* generic */
   3778 		delay(10*1000);
   3779 		break;
   3780 	case WM_T_80003:
   3781 	case WM_T_82571:
   3782 	case WM_T_82572:
   3783 	case WM_T_82575:
   3784 	case WM_T_82576:
   3785 	case WM_T_82580:
   3786 	case WM_T_I350:
   3787 	case WM_T_I354:
   3788 	case WM_T_I210:
   3789 	case WM_T_I211:
   3790 		if (sc->sc_type == WM_T_82571) {
   3791 			/* Only 82571 shares port 0 */
   3792 			mask = EEMNGCTL_CFGDONE_0;
   3793 		} else
   3794 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3795 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3796 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3797 				break;
   3798 			delay(1000);
   3799 		}
   3800 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3801 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3802 				device_xname(sc->sc_dev), __func__));
   3803 		}
   3804 		break;
   3805 	case WM_T_ICH8:
   3806 	case WM_T_ICH9:
   3807 	case WM_T_ICH10:
   3808 	case WM_T_PCH:
   3809 	case WM_T_PCH2:
   3810 	case WM_T_PCH_LPT:
   3811 	case WM_T_PCH_SPT:
   3812 	case WM_T_PCH_CNP:
   3813 		delay(10*1000);
   3814 		if (sc->sc_type >= WM_T_ICH10)
   3815 			wm_lan_init_done(sc);
   3816 		else
   3817 			wm_get_auto_rd_done(sc);
   3818 
   3819 		reg = CSR_READ(sc, WMREG_STATUS);
   3820 		if ((reg & STATUS_PHYRA) != 0)
   3821 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3822 		break;
   3823 	default:
   3824 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3825 		    __func__);
   3826 		break;
   3827 	}
   3828 }
   3829 
   3830 void
   3831 wm_phy_post_reset(struct wm_softc *sc)
   3832 {
   3833 	uint32_t reg;
   3834 
   3835 	/* This function is only for ICH8 and newer. */
   3836 	if (sc->sc_type < WM_T_ICH8)
   3837 		return;
   3838 
   3839 	if (wm_phy_resetisblocked(sc)) {
   3840 		/* XXX */
   3841 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3842 		return;
   3843 	}
   3844 
   3845 	/* Allow time for h/w to get to quiescent state after reset */
   3846 	delay(10*1000);
   3847 
   3848 	/* Perform any necessary post-reset workarounds */
   3849 	if (sc->sc_type == WM_T_PCH)
   3850 		wm_hv_phy_workaround_ich8lan(sc);
   3851 	if (sc->sc_type == WM_T_PCH2)
   3852 		wm_lv_phy_workaround_ich8lan(sc);
   3853 
   3854 	/* Clear the host wakeup bit after lcd reset */
   3855 	if (sc->sc_type >= WM_T_PCH) {
   3856 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3857 		    BM_PORT_GEN_CFG);
   3858 		reg &= ~BM_WUC_HOST_WU_BIT;
   3859 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3860 		    BM_PORT_GEN_CFG, reg);
   3861 	}
   3862 
   3863 	/* Configure the LCD with the extended configuration region in NVM */
   3864 	wm_init_lcd_from_nvm(sc);
   3865 
   3866 	/* Configure the LCD with the OEM bits in NVM */
   3867 }
   3868 
   3869 /* Only for PCH and newer */
   3870 static void
   3871 wm_write_smbus_addr(struct wm_softc *sc)
   3872 {
   3873 	uint32_t strap, freq;
   3874 	uint32_t phy_data;
   3875 
   3876 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3877 		device_xname(sc->sc_dev), __func__));
   3878 
   3879 	strap = CSR_READ(sc, WMREG_STRAP);
   3880 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3881 
   3882 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3883 
   3884 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3885 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3886 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3887 
   3888 	if (sc->sc_phytype == WMPHY_I217) {
   3889 		/* Restore SMBus frequency */
   3890 		if (freq --) {
   3891 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3892 			    | HV_SMB_ADDR_FREQ_HIGH);
   3893 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3894 			    HV_SMB_ADDR_FREQ_LOW);
   3895 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3896 			    HV_SMB_ADDR_FREQ_HIGH);
   3897 		} else {
   3898 			DPRINTF(WM_DEBUG_INIT,
   3899 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3900 				device_xname(sc->sc_dev), __func__));
   3901 		}
   3902 	}
   3903 
   3904 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3905 }
   3906 
   3907 void
   3908 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3909 {
   3910 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3911 	uint16_t phy_page = 0;
   3912 
   3913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3914 		device_xname(sc->sc_dev), __func__));
   3915 
   3916 	switch (sc->sc_type) {
   3917 	case WM_T_ICH8:
   3918 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3919 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3920 			return;
   3921 
   3922 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3923 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3924 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3925 			break;
   3926 		}
   3927 		/* FALLTHROUGH */
   3928 	case WM_T_PCH:
   3929 	case WM_T_PCH2:
   3930 	case WM_T_PCH_LPT:
   3931 	case WM_T_PCH_SPT:
   3932 	case WM_T_PCH_CNP:
   3933 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3934 		break;
   3935 	default:
   3936 		return;
   3937 	}
   3938 
   3939 	sc->phy.acquire(sc);
   3940 
   3941 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3942 	if ((reg & sw_cfg_mask) == 0)
   3943 		goto release;
   3944 
   3945 	/*
   3946 	 * Make sure HW does not configure LCD from PHY extended configuration
   3947 	 * before SW configuration
   3948 	 */
   3949 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3950 	if ((sc->sc_type < WM_T_PCH2)
   3951 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3952 		goto release;
   3953 
   3954 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3955 		device_xname(sc->sc_dev), __func__));
   3956 	/* word_addr is in DWORD */
   3957 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3958 
   3959 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3960 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3961 	if (cnf_size == 0)
   3962 		goto release;
   3963 
   3964 	if (((sc->sc_type == WM_T_PCH)
   3965 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3966 	    || (sc->sc_type > WM_T_PCH)) {
   3967 		/*
   3968 		 * HW configures the SMBus address and LEDs when the OEM and
   3969 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3970 		 * are cleared, SW will configure them instead.
   3971 		 */
   3972 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3973 			device_xname(sc->sc_dev), __func__));
   3974 		wm_write_smbus_addr(sc);
   3975 
   3976 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3977 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3978 	}
   3979 
   3980 	/* Configure LCD from extended configuration region. */
   3981 	for (i = 0; i < cnf_size; i++) {
   3982 		uint16_t reg_data, reg_addr;
   3983 
   3984 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3985 			goto release;
   3986 
   3987 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3988 			goto release;
   3989 
   3990 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3991 			phy_page = reg_data;
   3992 
   3993 		reg_addr &= IGPHY_MAXREGADDR;
   3994 		reg_addr |= phy_page;
   3995 
   3996 		sc->phy.release(sc); /* XXX */
   3997 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3998 		sc->phy.acquire(sc); /* XXX */
   3999 	}
   4000 
   4001 release:
   4002 	sc->phy.release(sc);
   4003 	return;
   4004 }
   4005 
   4006 
   4007 /* Init hardware bits */
   4008 void
   4009 wm_initialize_hardware_bits(struct wm_softc *sc)
   4010 {
   4011 	uint32_t tarc0, tarc1, reg;
   4012 
   4013 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4014 		device_xname(sc->sc_dev), __func__));
   4015 
   4016 	/* For 82571 variant, 80003 and ICHs */
   4017 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4018 	    || (sc->sc_type >= WM_T_80003)) {
   4019 
   4020 		/* Transmit Descriptor Control 0 */
   4021 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4022 		reg |= TXDCTL_COUNT_DESC;
   4023 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4024 
   4025 		/* Transmit Descriptor Control 1 */
   4026 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4027 		reg |= TXDCTL_COUNT_DESC;
   4028 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4029 
   4030 		/* TARC0 */
   4031 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4032 		switch (sc->sc_type) {
   4033 		case WM_T_82571:
   4034 		case WM_T_82572:
   4035 		case WM_T_82573:
   4036 		case WM_T_82574:
   4037 		case WM_T_82583:
   4038 		case WM_T_80003:
   4039 			/* Clear bits 30..27 */
   4040 			tarc0 &= ~__BITS(30, 27);
   4041 			break;
   4042 		default:
   4043 			break;
   4044 		}
   4045 
   4046 		switch (sc->sc_type) {
   4047 		case WM_T_82571:
   4048 		case WM_T_82572:
   4049 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4050 
   4051 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4052 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4053 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4054 			/* 8257[12] Errata No.7 */
   4055 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4056 
   4057 			/* TARC1 bit 28 */
   4058 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4059 				tarc1 &= ~__BIT(28);
   4060 			else
   4061 				tarc1 |= __BIT(28);
   4062 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4063 
   4064 			/*
   4065 			 * 8257[12] Errata No.13
   4066 			 * Disable Dyamic Clock Gating.
   4067 			 */
   4068 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4069 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4070 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4071 			break;
   4072 		case WM_T_82573:
   4073 		case WM_T_82574:
   4074 		case WM_T_82583:
   4075 			if ((sc->sc_type == WM_T_82574)
   4076 			    || (sc->sc_type == WM_T_82583))
   4077 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4078 
   4079 			/* Extended Device Control */
   4080 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4081 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4082 			reg |= __BIT(22);	/* Set bit 22 */
   4083 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4084 
   4085 			/* Device Control */
   4086 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4087 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4088 
   4089 			/* PCIe Control Register */
   4090 			/*
   4091 			 * 82573 Errata (unknown).
   4092 			 *
   4093 			 * 82574 Errata 25 and 82583 Errata 12
   4094 			 * "Dropped Rx Packets":
   4095 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4096 			 */
   4097 			reg = CSR_READ(sc, WMREG_GCR);
   4098 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4099 			CSR_WRITE(sc, WMREG_GCR, reg);
   4100 
   4101 			if ((sc->sc_type == WM_T_82574)
   4102 			    || (sc->sc_type == WM_T_82583)) {
   4103 				/*
   4104 				 * Document says this bit must be set for
   4105 				 * proper operation.
   4106 				 */
   4107 				reg = CSR_READ(sc, WMREG_GCR);
   4108 				reg |= __BIT(22);
   4109 				CSR_WRITE(sc, WMREG_GCR, reg);
   4110 
   4111 				/*
   4112 				 * Apply workaround for hardware errata
   4113 				 * documented in errata docs Fixes issue where
   4114 				 * some error prone or unreliable PCIe
   4115 				 * completions are occurring, particularly
   4116 				 * with ASPM enabled. Without fix, issue can
   4117 				 * cause Tx timeouts.
   4118 				 */
   4119 				reg = CSR_READ(sc, WMREG_GCR2);
   4120 				reg |= __BIT(0);
   4121 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4122 			}
   4123 			break;
   4124 		case WM_T_80003:
   4125 			/* TARC0 */
   4126 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4127 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4128 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4129 
   4130 			/* TARC1 bit 28 */
   4131 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4132 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4133 				tarc1 &= ~__BIT(28);
   4134 			else
   4135 				tarc1 |= __BIT(28);
   4136 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4137 			break;
   4138 		case WM_T_ICH8:
   4139 		case WM_T_ICH9:
   4140 		case WM_T_ICH10:
   4141 		case WM_T_PCH:
   4142 		case WM_T_PCH2:
   4143 		case WM_T_PCH_LPT:
   4144 		case WM_T_PCH_SPT:
   4145 		case WM_T_PCH_CNP:
   4146 			/* TARC0 */
   4147 			if (sc->sc_type == WM_T_ICH8) {
   4148 				/* Set TARC0 bits 29 and 28 */
   4149 				tarc0 |= __BITS(29, 28);
   4150 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4151 				tarc0 |= __BIT(29);
   4152 				/*
   4153 				 *  Drop bit 28. From Linux.
   4154 				 * See I218/I219 spec update
   4155 				 * "5. Buffer Overrun While the I219 is
   4156 				 * Processing DMA Transactions"
   4157 				 */
   4158 				tarc0 &= ~__BIT(28);
   4159 			}
   4160 			/* Set TARC0 bits 23,24,26,27 */
   4161 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4162 
   4163 			/* CTRL_EXT */
   4164 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4165 			reg |= __BIT(22);	/* Set bit 22 */
   4166 			/*
   4167 			 * Enable PHY low-power state when MAC is at D3
   4168 			 * w/o WoL
   4169 			 */
   4170 			if (sc->sc_type >= WM_T_PCH)
   4171 				reg |= CTRL_EXT_PHYPDEN;
   4172 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4173 
   4174 			/* TARC1 */
   4175 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4176 			/* bit 28 */
   4177 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4178 				tarc1 &= ~__BIT(28);
   4179 			else
   4180 				tarc1 |= __BIT(28);
   4181 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4182 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4183 
   4184 			/* Device Status */
   4185 			if (sc->sc_type == WM_T_ICH8) {
   4186 				reg = CSR_READ(sc, WMREG_STATUS);
   4187 				reg &= ~__BIT(31);
   4188 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4189 
   4190 			}
   4191 
   4192 			/* IOSFPC */
   4193 			if (sc->sc_type == WM_T_PCH_SPT) {
   4194 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4195 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4196 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4197 			}
   4198 			/*
   4199 			 * Work-around descriptor data corruption issue during
   4200 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4201 			 * capability.
   4202 			 */
   4203 			reg = CSR_READ(sc, WMREG_RFCTL);
   4204 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4205 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4206 			break;
   4207 		default:
   4208 			break;
   4209 		}
   4210 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4211 
   4212 		switch (sc->sc_type) {
   4213 		/*
   4214 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4215 		 * Avoid RSS Hash Value bug.
   4216 		 */
   4217 		case WM_T_82571:
   4218 		case WM_T_82572:
   4219 		case WM_T_82573:
   4220 		case WM_T_80003:
   4221 		case WM_T_ICH8:
   4222 			reg = CSR_READ(sc, WMREG_RFCTL);
   4223 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4224 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4225 			break;
   4226 		case WM_T_82574:
   4227 			/* use extened Rx descriptor. */
   4228 			reg = CSR_READ(sc, WMREG_RFCTL);
   4229 			reg |= WMREG_RFCTL_EXSTEN;
   4230 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4231 			break;
   4232 		default:
   4233 			break;
   4234 		}
   4235 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4236 		/*
   4237 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4238 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4239 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4240 		 * Correctly by the Device"
   4241 		 *
   4242 		 * I354(C2000) Errata AVR53:
   4243 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4244 		 * Hang"
   4245 		 */
   4246 		reg = CSR_READ(sc, WMREG_RFCTL);
   4247 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4248 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4249 	}
   4250 }
   4251 
   4252 static uint32_t
   4253 wm_rxpbs_adjust_82580(uint32_t val)
   4254 {
   4255 	uint32_t rv = 0;
   4256 
   4257 	if (val < __arraycount(wm_82580_rxpbs_table))
   4258 		rv = wm_82580_rxpbs_table[val];
   4259 
   4260 	return rv;
   4261 }
   4262 
   4263 /*
   4264  * wm_reset_phy:
   4265  *
   4266  *	generic PHY reset function.
   4267  *	Same as e1000_phy_hw_reset_generic()
   4268  */
   4269 static void
   4270 wm_reset_phy(struct wm_softc *sc)
   4271 {
   4272 	uint32_t reg;
   4273 
   4274 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4275 		device_xname(sc->sc_dev), __func__));
   4276 	if (wm_phy_resetisblocked(sc))
   4277 		return;
   4278 
   4279 	sc->phy.acquire(sc);
   4280 
   4281 	reg = CSR_READ(sc, WMREG_CTRL);
   4282 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4283 	CSR_WRITE_FLUSH(sc);
   4284 
   4285 	delay(sc->phy.reset_delay_us);
   4286 
   4287 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4288 	CSR_WRITE_FLUSH(sc);
   4289 
   4290 	delay(150);
   4291 
   4292 	sc->phy.release(sc);
   4293 
   4294 	wm_get_cfg_done(sc);
   4295 	wm_phy_post_reset(sc);
   4296 }
   4297 
   4298 /*
   4299  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4300  * so it is enough to check sc->sc_queue[0] only.
   4301  */
   4302 static void
   4303 wm_flush_desc_rings(struct wm_softc *sc)
   4304 {
   4305 	pcireg_t preg;
   4306 	uint32_t reg;
   4307 	struct wm_txqueue *txq;
   4308 	wiseman_txdesc_t *txd;
   4309 	int nexttx;
   4310 	uint32_t rctl;
   4311 
   4312 	/* First, disable MULR fix in FEXTNVM11 */
   4313 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4314 	reg |= FEXTNVM11_DIS_MULRFIX;
   4315 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4316 
   4317 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4318 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4319 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4320 		return;
   4321 
   4322 	/* TX */
   4323 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4324 	    device_xname(sc->sc_dev), preg, reg);
   4325 	reg = CSR_READ(sc, WMREG_TCTL);
   4326 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4327 
   4328 	txq = &sc->sc_queue[0].wmq_txq;
   4329 	nexttx = txq->txq_next;
   4330 	txd = &txq->txq_descs[nexttx];
   4331 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4332 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4333 	txd->wtx_fields.wtxu_status = 0;
   4334 	txd->wtx_fields.wtxu_options = 0;
   4335 	txd->wtx_fields.wtxu_vlan = 0;
   4336 
   4337 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4338 	    BUS_SPACE_BARRIER_WRITE);
   4339 
   4340 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4341 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4342 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4343 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4344 	delay(250);
   4345 
   4346 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4347 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4348 		return;
   4349 
   4350 	/* RX */
   4351 	printf("%s: Need RX flush (reg = %08x)\n",
   4352 	    device_xname(sc->sc_dev), preg);
   4353 	rctl = CSR_READ(sc, WMREG_RCTL);
   4354 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4355 	CSR_WRITE_FLUSH(sc);
   4356 	delay(150);
   4357 
   4358 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4359 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4360 	reg &= 0xffffc000;
   4361 	/*
   4362 	 * update thresholds: prefetch threshold to 31, host threshold
   4363 	 * to 1 and make sure the granularity is "descriptors" and not
   4364 	 * "cache lines"
   4365 	 */
   4366 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4367 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4368 
   4369 	/*
   4370 	 * momentarily enable the RX ring for the changes to take
   4371 	 * effect
   4372 	 */
   4373 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4374 	CSR_WRITE_FLUSH(sc);
   4375 	delay(150);
   4376 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4377 }
   4378 
   4379 /*
   4380  * wm_reset:
   4381  *
   4382  *	Reset the i82542 chip.
   4383  */
   4384 static void
   4385 wm_reset(struct wm_softc *sc)
   4386 {
   4387 	int phy_reset = 0;
   4388 	int i, error = 0;
   4389 	uint32_t reg;
   4390 	uint16_t kmreg;
   4391 	int rv;
   4392 
   4393 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4394 		device_xname(sc->sc_dev), __func__));
   4395 	KASSERT(sc->sc_type != 0);
   4396 
   4397 	/*
   4398 	 * Allocate on-chip memory according to the MTU size.
   4399 	 * The Packet Buffer Allocation register must be written
   4400 	 * before the chip is reset.
   4401 	 */
   4402 	switch (sc->sc_type) {
   4403 	case WM_T_82547:
   4404 	case WM_T_82547_2:
   4405 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4406 		    PBA_22K : PBA_30K;
   4407 		for (i = 0; i < sc->sc_nqueues; i++) {
   4408 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4409 			txq->txq_fifo_head = 0;
   4410 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4411 			txq->txq_fifo_size =
   4412 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4413 			txq->txq_fifo_stall = 0;
   4414 		}
   4415 		break;
   4416 	case WM_T_82571:
   4417 	case WM_T_82572:
   4418 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4419 	case WM_T_80003:
   4420 		sc->sc_pba = PBA_32K;
   4421 		break;
   4422 	case WM_T_82573:
   4423 		sc->sc_pba = PBA_12K;
   4424 		break;
   4425 	case WM_T_82574:
   4426 	case WM_T_82583:
   4427 		sc->sc_pba = PBA_20K;
   4428 		break;
   4429 	case WM_T_82576:
   4430 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4431 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4432 		break;
   4433 	case WM_T_82580:
   4434 	case WM_T_I350:
   4435 	case WM_T_I354:
   4436 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4437 		break;
   4438 	case WM_T_I210:
   4439 	case WM_T_I211:
   4440 		sc->sc_pba = PBA_34K;
   4441 		break;
   4442 	case WM_T_ICH8:
   4443 		/* Workaround for a bit corruption issue in FIFO memory */
   4444 		sc->sc_pba = PBA_8K;
   4445 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4446 		break;
   4447 	case WM_T_ICH9:
   4448 	case WM_T_ICH10:
   4449 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4450 		    PBA_14K : PBA_10K;
   4451 		break;
   4452 	case WM_T_PCH:
   4453 	case WM_T_PCH2:	/* XXX 14K? */
   4454 	case WM_T_PCH_LPT:
   4455 	case WM_T_PCH_SPT:
   4456 	case WM_T_PCH_CNP:
   4457 		sc->sc_pba = PBA_26K;
   4458 		break;
   4459 	default:
   4460 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4461 		    PBA_40K : PBA_48K;
   4462 		break;
   4463 	}
   4464 	/*
   4465 	 * Only old or non-multiqueue devices have the PBA register
   4466 	 * XXX Need special handling for 82575.
   4467 	 */
   4468 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4469 	    || (sc->sc_type == WM_T_82575))
   4470 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4471 
   4472 	/* Prevent the PCI-E bus from sticking */
   4473 	if (sc->sc_flags & WM_F_PCIE) {
   4474 		int timeout = 800;
   4475 
   4476 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4477 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4478 
   4479 		while (timeout--) {
   4480 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4481 			    == 0)
   4482 				break;
   4483 			delay(100);
   4484 		}
   4485 		if (timeout == 0)
   4486 			device_printf(sc->sc_dev,
   4487 			    "failed to disable busmastering\n");
   4488 	}
   4489 
   4490 	/* Set the completion timeout for interface */
   4491 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4492 	    || (sc->sc_type == WM_T_82580)
   4493 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4494 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4495 		wm_set_pcie_completion_timeout(sc);
   4496 
   4497 	/* Clear interrupt */
   4498 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4499 	if (wm_is_using_msix(sc)) {
   4500 		if (sc->sc_type != WM_T_82574) {
   4501 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4502 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4503 		} else {
   4504 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4505 		}
   4506 	}
   4507 
   4508 	/* Stop the transmit and receive processes. */
   4509 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4510 	sc->sc_rctl &= ~RCTL_EN;
   4511 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4512 	CSR_WRITE_FLUSH(sc);
   4513 
   4514 	/* XXX set_tbi_sbp_82543() */
   4515 
   4516 	delay(10*1000);
   4517 
   4518 	/* Must acquire the MDIO ownership before MAC reset */
   4519 	switch (sc->sc_type) {
   4520 	case WM_T_82573:
   4521 	case WM_T_82574:
   4522 	case WM_T_82583:
   4523 		error = wm_get_hw_semaphore_82573(sc);
   4524 		break;
   4525 	default:
   4526 		break;
   4527 	}
   4528 
   4529 	/*
   4530 	 * 82541 Errata 29? & 82547 Errata 28?
   4531 	 * See also the description about PHY_RST bit in CTRL register
   4532 	 * in 8254x_GBe_SDM.pdf.
   4533 	 */
   4534 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4535 		CSR_WRITE(sc, WMREG_CTRL,
   4536 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4537 		CSR_WRITE_FLUSH(sc);
   4538 		delay(5000);
   4539 	}
   4540 
   4541 	switch (sc->sc_type) {
   4542 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4543 	case WM_T_82541:
   4544 	case WM_T_82541_2:
   4545 	case WM_T_82547:
   4546 	case WM_T_82547_2:
   4547 		/*
   4548 		 * On some chipsets, a reset through a memory-mapped write
   4549 		 * cycle can cause the chip to reset before completing the
   4550 		 * write cycle.  This causes major headache that can be
   4551 		 * avoided by issuing the reset via indirect register writes
   4552 		 * through I/O space.
   4553 		 *
   4554 		 * So, if we successfully mapped the I/O BAR at attach time,
   4555 		 * use that.  Otherwise, try our luck with a memory-mapped
   4556 		 * reset.
   4557 		 */
   4558 		if (sc->sc_flags & WM_F_IOH_VALID)
   4559 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4560 		else
   4561 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4562 		break;
   4563 	case WM_T_82545_3:
   4564 	case WM_T_82546_3:
   4565 		/* Use the shadow control register on these chips. */
   4566 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4567 		break;
   4568 	case WM_T_80003:
   4569 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4570 		sc->phy.acquire(sc);
   4571 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4572 		sc->phy.release(sc);
   4573 		break;
   4574 	case WM_T_ICH8:
   4575 	case WM_T_ICH9:
   4576 	case WM_T_ICH10:
   4577 	case WM_T_PCH:
   4578 	case WM_T_PCH2:
   4579 	case WM_T_PCH_LPT:
   4580 	case WM_T_PCH_SPT:
   4581 	case WM_T_PCH_CNP:
   4582 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4583 		if (wm_phy_resetisblocked(sc) == false) {
   4584 			/*
   4585 			 * Gate automatic PHY configuration by hardware on
   4586 			 * non-managed 82579
   4587 			 */
   4588 			if ((sc->sc_type == WM_T_PCH2)
   4589 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4590 				== 0))
   4591 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4592 
   4593 			reg |= CTRL_PHY_RESET;
   4594 			phy_reset = 1;
   4595 		} else
   4596 			printf("XXX reset is blocked!!!\n");
   4597 		sc->phy.acquire(sc);
   4598 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4599 		/* Don't insert a completion barrier when reset */
   4600 		delay(20*1000);
   4601 		mutex_exit(sc->sc_ich_phymtx);
   4602 		break;
   4603 	case WM_T_82580:
   4604 	case WM_T_I350:
   4605 	case WM_T_I354:
   4606 	case WM_T_I210:
   4607 	case WM_T_I211:
   4608 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4609 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4610 			CSR_WRITE_FLUSH(sc);
   4611 		delay(5000);
   4612 		break;
   4613 	case WM_T_82542_2_0:
   4614 	case WM_T_82542_2_1:
   4615 	case WM_T_82543:
   4616 	case WM_T_82540:
   4617 	case WM_T_82545:
   4618 	case WM_T_82546:
   4619 	case WM_T_82571:
   4620 	case WM_T_82572:
   4621 	case WM_T_82573:
   4622 	case WM_T_82574:
   4623 	case WM_T_82575:
   4624 	case WM_T_82576:
   4625 	case WM_T_82583:
   4626 	default:
   4627 		/* Everything else can safely use the documented method. */
   4628 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4629 		break;
   4630 	}
   4631 
   4632 	/* Must release the MDIO ownership after MAC reset */
   4633 	switch (sc->sc_type) {
   4634 	case WM_T_82573:
   4635 	case WM_T_82574:
   4636 	case WM_T_82583:
   4637 		if (error == 0)
   4638 			wm_put_hw_semaphore_82573(sc);
   4639 		break;
   4640 	default:
   4641 		break;
   4642 	}
   4643 
   4644 	if (phy_reset != 0)
   4645 		wm_get_cfg_done(sc);
   4646 
   4647 	/* reload EEPROM */
   4648 	switch (sc->sc_type) {
   4649 	case WM_T_82542_2_0:
   4650 	case WM_T_82542_2_1:
   4651 	case WM_T_82543:
   4652 	case WM_T_82544:
   4653 		delay(10);
   4654 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4655 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4656 		CSR_WRITE_FLUSH(sc);
   4657 		delay(2000);
   4658 		break;
   4659 	case WM_T_82540:
   4660 	case WM_T_82545:
   4661 	case WM_T_82545_3:
   4662 	case WM_T_82546:
   4663 	case WM_T_82546_3:
   4664 		delay(5*1000);
   4665 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4666 		break;
   4667 	case WM_T_82541:
   4668 	case WM_T_82541_2:
   4669 	case WM_T_82547:
   4670 	case WM_T_82547_2:
   4671 		delay(20000);
   4672 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4673 		break;
   4674 	case WM_T_82571:
   4675 	case WM_T_82572:
   4676 	case WM_T_82573:
   4677 	case WM_T_82574:
   4678 	case WM_T_82583:
   4679 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4680 			delay(10);
   4681 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4682 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4683 			CSR_WRITE_FLUSH(sc);
   4684 		}
   4685 		/* check EECD_EE_AUTORD */
   4686 		wm_get_auto_rd_done(sc);
   4687 		/*
   4688 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4689 		 * is set.
   4690 		 */
   4691 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4692 		    || (sc->sc_type == WM_T_82583))
   4693 			delay(25*1000);
   4694 		break;
   4695 	case WM_T_82575:
   4696 	case WM_T_82576:
   4697 	case WM_T_82580:
   4698 	case WM_T_I350:
   4699 	case WM_T_I354:
   4700 	case WM_T_I210:
   4701 	case WM_T_I211:
   4702 	case WM_T_80003:
   4703 		/* check EECD_EE_AUTORD */
   4704 		wm_get_auto_rd_done(sc);
   4705 		break;
   4706 	case WM_T_ICH8:
   4707 	case WM_T_ICH9:
   4708 	case WM_T_ICH10:
   4709 	case WM_T_PCH:
   4710 	case WM_T_PCH2:
   4711 	case WM_T_PCH_LPT:
   4712 	case WM_T_PCH_SPT:
   4713 	case WM_T_PCH_CNP:
   4714 		break;
   4715 	default:
   4716 		panic("%s: unknown type\n", __func__);
   4717 	}
   4718 
   4719 	/* Check whether EEPROM is present or not */
   4720 	switch (sc->sc_type) {
   4721 	case WM_T_82575:
   4722 	case WM_T_82576:
   4723 	case WM_T_82580:
   4724 	case WM_T_I350:
   4725 	case WM_T_I354:
   4726 	case WM_T_ICH8:
   4727 	case WM_T_ICH9:
   4728 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4729 			/* Not found */
   4730 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4731 			if (sc->sc_type == WM_T_82575)
   4732 				wm_reset_init_script_82575(sc);
   4733 		}
   4734 		break;
   4735 	default:
   4736 		break;
   4737 	}
   4738 
   4739 	if (phy_reset != 0)
   4740 		wm_phy_post_reset(sc);
   4741 
   4742 	if ((sc->sc_type == WM_T_82580)
   4743 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4744 		/* clear global device reset status bit */
   4745 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4746 	}
   4747 
   4748 	/* Clear any pending interrupt events. */
   4749 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4750 	reg = CSR_READ(sc, WMREG_ICR);
   4751 	if (wm_is_using_msix(sc)) {
   4752 		if (sc->sc_type != WM_T_82574) {
   4753 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4754 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4755 		} else
   4756 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4757 	}
   4758 
   4759 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4760 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4761 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4762 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4763 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4764 		reg |= KABGTXD_BGSQLBIAS;
   4765 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4766 	}
   4767 
   4768 	/* reload sc_ctrl */
   4769 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4770 
   4771 	if (sc->sc_type == WM_T_I354) {
   4772 #if 0
   4773 		/* I354 uses an external PHY */
   4774 		wm_set_eee_i354(sc);
   4775 #endif
   4776 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4777 		wm_set_eee_i350(sc);
   4778 
   4779 	/*
   4780 	 * For PCH, this write will make sure that any noise will be detected
   4781 	 * as a CRC error and be dropped rather than show up as a bad packet
   4782 	 * to the DMA engine
   4783 	 */
   4784 	if (sc->sc_type == WM_T_PCH)
   4785 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4786 
   4787 	if (sc->sc_type >= WM_T_82544)
   4788 		CSR_WRITE(sc, WMREG_WUC, 0);
   4789 
   4790 	wm_reset_mdicnfg_82580(sc);
   4791 
   4792 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4793 		wm_pll_workaround_i210(sc);
   4794 
   4795 	if (sc->sc_type == WM_T_80003) {
   4796 		/* default to TRUE to enable the MDIC W/A */
   4797 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4798 
   4799 		rv = wm_kmrn_readreg(sc,
   4800 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4801 		if (rv == 0) {
   4802 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4803 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4804 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4805 			else
   4806 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4807 		}
   4808 	}
   4809 }
   4810 
   4811 /*
   4812  * wm_add_rxbuf:
   4813  *
   4814  *	Add a receive buffer to the indiciated descriptor.
   4815  */
   4816 static int
   4817 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4818 {
   4819 	struct wm_softc *sc = rxq->rxq_sc;
   4820 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4821 	struct mbuf *m;
   4822 	int error;
   4823 
   4824 	KASSERT(mutex_owned(rxq->rxq_lock));
   4825 
   4826 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4827 	if (m == NULL)
   4828 		return ENOBUFS;
   4829 
   4830 	MCLGET(m, M_DONTWAIT);
   4831 	if ((m->m_flags & M_EXT) == 0) {
   4832 		m_freem(m);
   4833 		return ENOBUFS;
   4834 	}
   4835 
   4836 	if (rxs->rxs_mbuf != NULL)
   4837 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4838 
   4839 	rxs->rxs_mbuf = m;
   4840 
   4841 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4842 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4843 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4844 	if (error) {
   4845 		/* XXX XXX XXX */
   4846 		aprint_error_dev(sc->sc_dev,
   4847 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4848 		panic("wm_add_rxbuf");
   4849 	}
   4850 
   4851 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4852 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4853 
   4854 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4855 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4856 			wm_init_rxdesc(rxq, idx);
   4857 	} else
   4858 		wm_init_rxdesc(rxq, idx);
   4859 
   4860 	return 0;
   4861 }
   4862 
   4863 /*
   4864  * wm_rxdrain:
   4865  *
   4866  *	Drain the receive queue.
   4867  */
   4868 static void
   4869 wm_rxdrain(struct wm_rxqueue *rxq)
   4870 {
   4871 	struct wm_softc *sc = rxq->rxq_sc;
   4872 	struct wm_rxsoft *rxs;
   4873 	int i;
   4874 
   4875 	KASSERT(mutex_owned(rxq->rxq_lock));
   4876 
   4877 	for (i = 0; i < WM_NRXDESC; i++) {
   4878 		rxs = &rxq->rxq_soft[i];
   4879 		if (rxs->rxs_mbuf != NULL) {
   4880 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4881 			m_freem(rxs->rxs_mbuf);
   4882 			rxs->rxs_mbuf = NULL;
   4883 		}
   4884 	}
   4885 }
   4886 
   4887 /*
   4888  * Setup registers for RSS.
   4889  *
   4890  * XXX not yet VMDq support
   4891  */
   4892 static void
   4893 wm_init_rss(struct wm_softc *sc)
   4894 {
   4895 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4896 	int i;
   4897 
   4898 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4899 
   4900 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4901 		int qid, reta_ent;
   4902 
   4903 		qid  = i % sc->sc_nqueues;
   4904 		switch(sc->sc_type) {
   4905 		case WM_T_82574:
   4906 			reta_ent = __SHIFTIN(qid,
   4907 			    RETA_ENT_QINDEX_MASK_82574);
   4908 			break;
   4909 		case WM_T_82575:
   4910 			reta_ent = __SHIFTIN(qid,
   4911 			    RETA_ENT_QINDEX1_MASK_82575);
   4912 			break;
   4913 		default:
   4914 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4915 			break;
   4916 		}
   4917 
   4918 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4919 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4920 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4921 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4922 	}
   4923 
   4924 	rss_getkey((uint8_t *)rss_key);
   4925 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4926 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4927 
   4928 	if (sc->sc_type == WM_T_82574)
   4929 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4930 	else
   4931 		mrqc = MRQC_ENABLE_RSS_MQ;
   4932 
   4933 	/*
   4934 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4935 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4936 	 */
   4937 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4938 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4939 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4940 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4941 
   4942 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4943 }
   4944 
   4945 /*
   4946  * Adjust TX and RX queue numbers which the system actulally uses.
   4947  *
   4948  * The numbers are affected by below parameters.
   4949  *     - The nubmer of hardware queues
   4950  *     - The number of MSI-X vectors (= "nvectors" argument)
   4951  *     - ncpu
   4952  */
   4953 static void
   4954 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4955 {
   4956 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4957 
   4958 	if (nvectors < 2) {
   4959 		sc->sc_nqueues = 1;
   4960 		return;
   4961 	}
   4962 
   4963 	switch(sc->sc_type) {
   4964 	case WM_T_82572:
   4965 		hw_ntxqueues = 2;
   4966 		hw_nrxqueues = 2;
   4967 		break;
   4968 	case WM_T_82574:
   4969 		hw_ntxqueues = 2;
   4970 		hw_nrxqueues = 2;
   4971 		break;
   4972 	case WM_T_82575:
   4973 		hw_ntxqueues = 4;
   4974 		hw_nrxqueues = 4;
   4975 		break;
   4976 	case WM_T_82576:
   4977 		hw_ntxqueues = 16;
   4978 		hw_nrxqueues = 16;
   4979 		break;
   4980 	case WM_T_82580:
   4981 	case WM_T_I350:
   4982 	case WM_T_I354:
   4983 		hw_ntxqueues = 8;
   4984 		hw_nrxqueues = 8;
   4985 		break;
   4986 	case WM_T_I210:
   4987 		hw_ntxqueues = 4;
   4988 		hw_nrxqueues = 4;
   4989 		break;
   4990 	case WM_T_I211:
   4991 		hw_ntxqueues = 2;
   4992 		hw_nrxqueues = 2;
   4993 		break;
   4994 		/*
   4995 		 * As below ethernet controllers does not support MSI-X,
   4996 		 * this driver let them not use multiqueue.
   4997 		 *     - WM_T_80003
   4998 		 *     - WM_T_ICH8
   4999 		 *     - WM_T_ICH9
   5000 		 *     - WM_T_ICH10
   5001 		 *     - WM_T_PCH
   5002 		 *     - WM_T_PCH2
   5003 		 *     - WM_T_PCH_LPT
   5004 		 */
   5005 	default:
   5006 		hw_ntxqueues = 1;
   5007 		hw_nrxqueues = 1;
   5008 		break;
   5009 	}
   5010 
   5011 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   5012 
   5013 	/*
   5014 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5015 	 * the number of queues used actually.
   5016 	 */
   5017 	if (nvectors < hw_nqueues + 1)
   5018 		sc->sc_nqueues = nvectors - 1;
   5019 	else
   5020 		sc->sc_nqueues = hw_nqueues;
   5021 
   5022 	/*
   5023 	 * As queues more then cpus cannot improve scaling, we limit
   5024 	 * the number of queues used actually.
   5025 	 */
   5026 	if (ncpu < sc->sc_nqueues)
   5027 		sc->sc_nqueues = ncpu;
   5028 }
   5029 
   5030 static inline bool
   5031 wm_is_using_msix(struct wm_softc *sc)
   5032 {
   5033 
   5034 	return (sc->sc_nintrs > 1);
   5035 }
   5036 
   5037 static inline bool
   5038 wm_is_using_multiqueue(struct wm_softc *sc)
   5039 {
   5040 
   5041 	return (sc->sc_nqueues > 1);
   5042 }
   5043 
   5044 static int
   5045 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5046 {
   5047 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5048 	wmq->wmq_id = qidx;
   5049 	wmq->wmq_intr_idx = intr_idx;
   5050 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5051 #ifdef WM_MPSAFE
   5052 	    | SOFTINT_MPSAFE
   5053 #endif
   5054 	    , wm_handle_queue, wmq);
   5055 	if (wmq->wmq_si != NULL)
   5056 		return 0;
   5057 
   5058 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5059 	    wmq->wmq_id);
   5060 
   5061 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5062 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5063 	return ENOMEM;
   5064 }
   5065 
   5066 /*
   5067  * Both single interrupt MSI and INTx can use this function.
   5068  */
   5069 static int
   5070 wm_setup_legacy(struct wm_softc *sc)
   5071 {
   5072 	pci_chipset_tag_t pc = sc->sc_pc;
   5073 	const char *intrstr = NULL;
   5074 	char intrbuf[PCI_INTRSTR_LEN];
   5075 	int error;
   5076 
   5077 	error = wm_alloc_txrx_queues(sc);
   5078 	if (error) {
   5079 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5080 		    error);
   5081 		return ENOMEM;
   5082 	}
   5083 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5084 	    sizeof(intrbuf));
   5085 #ifdef WM_MPSAFE
   5086 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5087 #endif
   5088 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5089 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5090 	if (sc->sc_ihs[0] == NULL) {
   5091 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5092 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5093 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5094 		return ENOMEM;
   5095 	}
   5096 
   5097 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5098 	sc->sc_nintrs = 1;
   5099 
   5100 	return wm_softint_establish(sc, 0, 0);
   5101 }
   5102 
   5103 static int
   5104 wm_setup_msix(struct wm_softc *sc)
   5105 {
   5106 	void *vih;
   5107 	kcpuset_t *affinity;
   5108 	int qidx, error, intr_idx, txrx_established;
   5109 	pci_chipset_tag_t pc = sc->sc_pc;
   5110 	const char *intrstr = NULL;
   5111 	char intrbuf[PCI_INTRSTR_LEN];
   5112 	char intr_xname[INTRDEVNAMEBUF];
   5113 
   5114 	if (sc->sc_nqueues < ncpu) {
   5115 		/*
   5116 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5117 		 * interrupts start from CPU#1.
   5118 		 */
   5119 		sc->sc_affinity_offset = 1;
   5120 	} else {
   5121 		/*
   5122 		 * In this case, this device use all CPUs. So, we unify
   5123 		 * affinitied cpu_index to msix vector number for readability.
   5124 		 */
   5125 		sc->sc_affinity_offset = 0;
   5126 	}
   5127 
   5128 	error = wm_alloc_txrx_queues(sc);
   5129 	if (error) {
   5130 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5131 		    error);
   5132 		return ENOMEM;
   5133 	}
   5134 
   5135 	kcpuset_create(&affinity, false);
   5136 	intr_idx = 0;
   5137 
   5138 	/*
   5139 	 * TX and RX
   5140 	 */
   5141 	txrx_established = 0;
   5142 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5143 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5144 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5145 
   5146 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5147 		    sizeof(intrbuf));
   5148 #ifdef WM_MPSAFE
   5149 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5150 		    PCI_INTR_MPSAFE, true);
   5151 #endif
   5152 		memset(intr_xname, 0, sizeof(intr_xname));
   5153 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5154 		    device_xname(sc->sc_dev), qidx);
   5155 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5156 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5157 		if (vih == NULL) {
   5158 			aprint_error_dev(sc->sc_dev,
   5159 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5160 			    intrstr ? " at " : "",
   5161 			    intrstr ? intrstr : "");
   5162 
   5163 			goto fail;
   5164 		}
   5165 		kcpuset_zero(affinity);
   5166 		/* Round-robin affinity */
   5167 		kcpuset_set(affinity, affinity_to);
   5168 		error = interrupt_distribute(vih, affinity, NULL);
   5169 		if (error == 0) {
   5170 			aprint_normal_dev(sc->sc_dev,
   5171 			    "for TX and RX interrupting at %s affinity to %u\n",
   5172 			    intrstr, affinity_to);
   5173 		} else {
   5174 			aprint_normal_dev(sc->sc_dev,
   5175 			    "for TX and RX interrupting at %s\n", intrstr);
   5176 		}
   5177 		sc->sc_ihs[intr_idx] = vih;
   5178 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5179 			goto fail;
   5180 		txrx_established++;
   5181 		intr_idx++;
   5182 	}
   5183 
   5184 	/*
   5185 	 * LINK
   5186 	 */
   5187 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5188 	    sizeof(intrbuf));
   5189 #ifdef WM_MPSAFE
   5190 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5191 #endif
   5192 	memset(intr_xname, 0, sizeof(intr_xname));
   5193 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5194 	    device_xname(sc->sc_dev));
   5195 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5196 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5197 	if (vih == NULL) {
   5198 		aprint_error_dev(sc->sc_dev,
   5199 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5200 		    intrstr ? " at " : "",
   5201 		    intrstr ? intrstr : "");
   5202 
   5203 		goto fail;
   5204 	}
   5205 	/* keep default affinity to LINK interrupt */
   5206 	aprint_normal_dev(sc->sc_dev,
   5207 	    "for LINK interrupting at %s\n", intrstr);
   5208 	sc->sc_ihs[intr_idx] = vih;
   5209 	sc->sc_link_intr_idx = intr_idx;
   5210 
   5211 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5212 	kcpuset_destroy(affinity);
   5213 	return 0;
   5214 
   5215  fail:
   5216 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5217 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5218 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5219 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5220 	}
   5221 
   5222 	kcpuset_destroy(affinity);
   5223 	return ENOMEM;
   5224 }
   5225 
   5226 static void
   5227 wm_unset_stopping_flags(struct wm_softc *sc)
   5228 {
   5229 	int i;
   5230 
   5231 	KASSERT(WM_CORE_LOCKED(sc));
   5232 
   5233 	/*
   5234 	 * must unset stopping flags in ascending order.
   5235 	 */
   5236 	for(i = 0; i < sc->sc_nqueues; i++) {
   5237 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5238 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5239 
   5240 		mutex_enter(txq->txq_lock);
   5241 		txq->txq_stopping = false;
   5242 		mutex_exit(txq->txq_lock);
   5243 
   5244 		mutex_enter(rxq->rxq_lock);
   5245 		rxq->rxq_stopping = false;
   5246 		mutex_exit(rxq->rxq_lock);
   5247 	}
   5248 
   5249 	sc->sc_core_stopping = false;
   5250 }
   5251 
   5252 static void
   5253 wm_set_stopping_flags(struct wm_softc *sc)
   5254 {
   5255 	int i;
   5256 
   5257 	KASSERT(WM_CORE_LOCKED(sc));
   5258 
   5259 	sc->sc_core_stopping = true;
   5260 
   5261 	/*
   5262 	 * must set stopping flags in ascending order.
   5263 	 */
   5264 	for(i = 0; i < sc->sc_nqueues; i++) {
   5265 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5266 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5267 
   5268 		mutex_enter(rxq->rxq_lock);
   5269 		rxq->rxq_stopping = true;
   5270 		mutex_exit(rxq->rxq_lock);
   5271 
   5272 		mutex_enter(txq->txq_lock);
   5273 		txq->txq_stopping = true;
   5274 		mutex_exit(txq->txq_lock);
   5275 	}
   5276 }
   5277 
   5278 /*
   5279  * write interrupt interval value to ITR or EITR
   5280  */
   5281 static void
   5282 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5283 {
   5284 
   5285 	if (!wmq->wmq_set_itr)
   5286 		return;
   5287 
   5288 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5289 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5290 
   5291 		/*
   5292 		 * 82575 doesn't have CNT_INGR field.
   5293 		 * So, overwrite counter field by software.
   5294 		 */
   5295 		if (sc->sc_type == WM_T_82575)
   5296 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5297 		else
   5298 			eitr |= EITR_CNT_INGR;
   5299 
   5300 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5301 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5302 		/*
   5303 		 * 82574 has both ITR and EITR. SET EITR when we use
   5304 		 * the multi queue function with MSI-X.
   5305 		 */
   5306 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5307 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5308 	} else {
   5309 		KASSERT(wmq->wmq_id == 0);
   5310 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5311 	}
   5312 
   5313 	wmq->wmq_set_itr = false;
   5314 }
   5315 
   5316 /*
   5317  * TODO
   5318  * Below dynamic calculation of itr is almost the same as linux igb,
   5319  * however it does not fit to wm(4). So, we will have been disable AIM
   5320  * until we will find appropriate calculation of itr.
   5321  */
   5322 /*
   5323  * calculate interrupt interval value to be going to write register in
   5324  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5325  */
   5326 static void
   5327 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5328 {
   5329 #ifdef NOTYET
   5330 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5331 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5332 	uint32_t avg_size = 0;
   5333 	uint32_t new_itr;
   5334 
   5335 	if (rxq->rxq_packets)
   5336 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5337 	if (txq->txq_packets)
   5338 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5339 
   5340 	if (avg_size == 0) {
   5341 		new_itr = 450; /* restore default value */
   5342 		goto out;
   5343 	}
   5344 
   5345 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5346 	avg_size += 24;
   5347 
   5348 	/* Don't starve jumbo frames */
   5349 	avg_size = min(avg_size, 3000);
   5350 
   5351 	/* Give a little boost to mid-size frames */
   5352 	if ((avg_size > 300) && (avg_size < 1200))
   5353 		new_itr = avg_size / 3;
   5354 	else
   5355 		new_itr = avg_size / 2;
   5356 
   5357 out:
   5358 	/*
   5359 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5360 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5361 	 */
   5362 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5363 		new_itr *= 4;
   5364 
   5365 	if (new_itr != wmq->wmq_itr) {
   5366 		wmq->wmq_itr = new_itr;
   5367 		wmq->wmq_set_itr = true;
   5368 	} else
   5369 		wmq->wmq_set_itr = false;
   5370 
   5371 	rxq->rxq_packets = 0;
   5372 	rxq->rxq_bytes = 0;
   5373 	txq->txq_packets = 0;
   5374 	txq->txq_bytes = 0;
   5375 #endif
   5376 }
   5377 
   5378 /*
   5379  * wm_init:		[ifnet interface function]
   5380  *
   5381  *	Initialize the interface.
   5382  */
   5383 static int
   5384 wm_init(struct ifnet *ifp)
   5385 {
   5386 	struct wm_softc *sc = ifp->if_softc;
   5387 	int ret;
   5388 
   5389 	WM_CORE_LOCK(sc);
   5390 	ret = wm_init_locked(ifp);
   5391 	WM_CORE_UNLOCK(sc);
   5392 
   5393 	return ret;
   5394 }
   5395 
   5396 static int
   5397 wm_init_locked(struct ifnet *ifp)
   5398 {
   5399 	struct wm_softc *sc = ifp->if_softc;
   5400 	int i, j, trynum, error = 0;
   5401 	uint32_t reg;
   5402 
   5403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5404 		device_xname(sc->sc_dev), __func__));
   5405 	KASSERT(WM_CORE_LOCKED(sc));
   5406 
   5407 	/*
   5408 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5409 	 * There is a small but measurable benefit to avoiding the adjusment
   5410 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5411 	 * on such platforms.  One possibility is that the DMA itself is
   5412 	 * slightly more efficient if the front of the entire packet (instead
   5413 	 * of the front of the headers) is aligned.
   5414 	 *
   5415 	 * Note we must always set align_tweak to 0 if we are using
   5416 	 * jumbo frames.
   5417 	 */
   5418 #ifdef __NO_STRICT_ALIGNMENT
   5419 	sc->sc_align_tweak = 0;
   5420 #else
   5421 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5422 		sc->sc_align_tweak = 0;
   5423 	else
   5424 		sc->sc_align_tweak = 2;
   5425 #endif /* __NO_STRICT_ALIGNMENT */
   5426 
   5427 	/* Cancel any pending I/O. */
   5428 	wm_stop_locked(ifp, 0);
   5429 
   5430 	/* update statistics before reset */
   5431 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5432 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5433 
   5434 	/* PCH_SPT hardware workaround */
   5435 	if (sc->sc_type == WM_T_PCH_SPT)
   5436 		wm_flush_desc_rings(sc);
   5437 
   5438 	/* Reset the chip to a known state. */
   5439 	wm_reset(sc);
   5440 
   5441 	/*
   5442 	 * AMT based hardware can now take control from firmware
   5443 	 * Do this after reset.
   5444 	 */
   5445 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5446 		wm_get_hw_control(sc);
   5447 
   5448 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5449 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5450 		wm_legacy_irq_quirk_spt(sc);
   5451 
   5452 	/* Init hardware bits */
   5453 	wm_initialize_hardware_bits(sc);
   5454 
   5455 	/* Reset the PHY. */
   5456 	if (sc->sc_flags & WM_F_HAS_MII)
   5457 		wm_gmii_reset(sc);
   5458 
   5459 	/* Calculate (E)ITR value */
   5460 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5461 		/*
   5462 		 * For NEWQUEUE's EITR (except for 82575).
   5463 		 * 82575's EITR should be set same throttling value as other
   5464 		 * old controllers' ITR because the interrupt/sec calculation
   5465 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5466 		 *
   5467 		 * 82574's EITR should be set same throttling value as ITR.
   5468 		 *
   5469 		 * For N interrupts/sec, set this value to:
   5470 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5471 		 */
   5472 		sc->sc_itr_init = 450;
   5473 	} else if (sc->sc_type >= WM_T_82543) {
   5474 		/*
   5475 		 * Set up the interrupt throttling register (units of 256ns)
   5476 		 * Note that a footnote in Intel's documentation says this
   5477 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5478 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5479 		 * that that is also true for the 1024ns units of the other
   5480 		 * interrupt-related timer registers -- so, really, we ought
   5481 		 * to divide this value by 4 when the link speed is low.
   5482 		 *
   5483 		 * XXX implement this division at link speed change!
   5484 		 */
   5485 
   5486 		/*
   5487 		 * For N interrupts/sec, set this value to:
   5488 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5489 		 * absolute and packet timer values to this value
   5490 		 * divided by 4 to get "simple timer" behavior.
   5491 		 */
   5492 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5493 	}
   5494 
   5495 	error = wm_init_txrx_queues(sc);
   5496 	if (error)
   5497 		goto out;
   5498 
   5499 	/*
   5500 	 * Clear out the VLAN table -- we don't use it (yet).
   5501 	 */
   5502 	CSR_WRITE(sc, WMREG_VET, 0);
   5503 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5504 		trynum = 10; /* Due to hw errata */
   5505 	else
   5506 		trynum = 1;
   5507 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5508 		for (j = 0; j < trynum; j++)
   5509 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5510 
   5511 	/*
   5512 	 * Set up flow-control parameters.
   5513 	 *
   5514 	 * XXX Values could probably stand some tuning.
   5515 	 */
   5516 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5517 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5518 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5519 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5520 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5521 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5522 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5523 	}
   5524 
   5525 	sc->sc_fcrtl = FCRTL_DFLT;
   5526 	if (sc->sc_type < WM_T_82543) {
   5527 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5528 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5529 	} else {
   5530 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5531 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5532 	}
   5533 
   5534 	if (sc->sc_type == WM_T_80003)
   5535 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5536 	else
   5537 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5538 
   5539 	/* Writes the control register. */
   5540 	wm_set_vlan(sc);
   5541 
   5542 	if (sc->sc_flags & WM_F_HAS_MII) {
   5543 		uint16_t kmreg;
   5544 
   5545 		switch (sc->sc_type) {
   5546 		case WM_T_80003:
   5547 		case WM_T_ICH8:
   5548 		case WM_T_ICH9:
   5549 		case WM_T_ICH10:
   5550 		case WM_T_PCH:
   5551 		case WM_T_PCH2:
   5552 		case WM_T_PCH_LPT:
   5553 		case WM_T_PCH_SPT:
   5554 		case WM_T_PCH_CNP:
   5555 			/*
   5556 			 * Set the mac to wait the maximum time between each
   5557 			 * iteration and increase the max iterations when
   5558 			 * polling the phy; this fixes erroneous timeouts at
   5559 			 * 10Mbps.
   5560 			 */
   5561 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5562 			    0xFFFF);
   5563 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5564 			    &kmreg);
   5565 			kmreg |= 0x3F;
   5566 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5567 			    kmreg);
   5568 			break;
   5569 		default:
   5570 			break;
   5571 		}
   5572 
   5573 		if (sc->sc_type == WM_T_80003) {
   5574 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5575 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5576 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5577 
   5578 			/* Bypass RX and TX FIFO's */
   5579 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5580 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5581 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5582 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5583 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5584 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5585 		}
   5586 	}
   5587 #if 0
   5588 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5589 #endif
   5590 
   5591 	/* Set up checksum offload parameters. */
   5592 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5593 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5594 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5595 		reg |= RXCSUM_IPOFL;
   5596 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5597 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5598 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5599 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5600 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5601 
   5602 	/* Set registers about MSI-X */
   5603 	if (wm_is_using_msix(sc)) {
   5604 		uint32_t ivar;
   5605 		struct wm_queue *wmq;
   5606 		int qid, qintr_idx;
   5607 
   5608 		if (sc->sc_type == WM_T_82575) {
   5609 			/* Interrupt control */
   5610 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5611 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5612 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5613 
   5614 			/* TX and RX */
   5615 			for (i = 0; i < sc->sc_nqueues; i++) {
   5616 				wmq = &sc->sc_queue[i];
   5617 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5618 				    EITR_TX_QUEUE(wmq->wmq_id)
   5619 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5620 			}
   5621 			/* Link status */
   5622 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5623 			    EITR_OTHER);
   5624 		} else if (sc->sc_type == WM_T_82574) {
   5625 			/* Interrupt control */
   5626 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5627 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5628 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5629 
   5630 			/*
   5631 			 * workaround issue with spurious interrupts
   5632 			 * in MSI-X mode.
   5633 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5634 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5635 			 */
   5636 			reg = CSR_READ(sc, WMREG_RFCTL);
   5637 			reg |= WMREG_RFCTL_ACKDIS;
   5638 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5639 
   5640 			ivar = 0;
   5641 			/* TX and RX */
   5642 			for (i = 0; i < sc->sc_nqueues; i++) {
   5643 				wmq = &sc->sc_queue[i];
   5644 				qid = wmq->wmq_id;
   5645 				qintr_idx = wmq->wmq_intr_idx;
   5646 
   5647 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5648 				    IVAR_TX_MASK_Q_82574(qid));
   5649 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5650 				    IVAR_RX_MASK_Q_82574(qid));
   5651 			}
   5652 			/* Link status */
   5653 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5654 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5655 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5656 		} else {
   5657 			/* Interrupt control */
   5658 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5659 			    | GPIE_EIAME | GPIE_PBA);
   5660 
   5661 			switch (sc->sc_type) {
   5662 			case WM_T_82580:
   5663 			case WM_T_I350:
   5664 			case WM_T_I354:
   5665 			case WM_T_I210:
   5666 			case WM_T_I211:
   5667 				/* TX and RX */
   5668 				for (i = 0; i < sc->sc_nqueues; i++) {
   5669 					wmq = &sc->sc_queue[i];
   5670 					qid = wmq->wmq_id;
   5671 					qintr_idx = wmq->wmq_intr_idx;
   5672 
   5673 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5674 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5675 					ivar |= __SHIFTIN((qintr_idx
   5676 						| IVAR_VALID),
   5677 					    IVAR_TX_MASK_Q(qid));
   5678 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5679 					ivar |= __SHIFTIN((qintr_idx
   5680 						| IVAR_VALID),
   5681 					    IVAR_RX_MASK_Q(qid));
   5682 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5683 				}
   5684 				break;
   5685 			case WM_T_82576:
   5686 				/* TX and RX */
   5687 				for (i = 0; i < sc->sc_nqueues; i++) {
   5688 					wmq = &sc->sc_queue[i];
   5689 					qid = wmq->wmq_id;
   5690 					qintr_idx = wmq->wmq_intr_idx;
   5691 
   5692 					ivar = CSR_READ(sc,
   5693 					    WMREG_IVAR_Q_82576(qid));
   5694 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5695 					ivar |= __SHIFTIN((qintr_idx
   5696 						| IVAR_VALID),
   5697 					    IVAR_TX_MASK_Q_82576(qid));
   5698 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5699 					ivar |= __SHIFTIN((qintr_idx
   5700 						| IVAR_VALID),
   5701 					    IVAR_RX_MASK_Q_82576(qid));
   5702 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5703 					    ivar);
   5704 				}
   5705 				break;
   5706 			default:
   5707 				break;
   5708 			}
   5709 
   5710 			/* Link status */
   5711 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5712 			    IVAR_MISC_OTHER);
   5713 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5714 		}
   5715 
   5716 		if (wm_is_using_multiqueue(sc)) {
   5717 			wm_init_rss(sc);
   5718 
   5719 			/*
   5720 			** NOTE: Receive Full-Packet Checksum Offload
   5721 			** is mutually exclusive with Multiqueue. However
   5722 			** this is not the same as TCP/IP checksums which
   5723 			** still work.
   5724 			*/
   5725 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5726 			reg |= RXCSUM_PCSD;
   5727 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5728 		}
   5729 	}
   5730 
   5731 	/* Set up the interrupt registers. */
   5732 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5733 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5734 	    ICR_RXO | ICR_RXT0;
   5735 	if (wm_is_using_msix(sc)) {
   5736 		uint32_t mask;
   5737 		struct wm_queue *wmq;
   5738 
   5739 		switch (sc->sc_type) {
   5740 		case WM_T_82574:
   5741 			mask = 0;
   5742 			for (i = 0; i < sc->sc_nqueues; i++) {
   5743 				wmq = &sc->sc_queue[i];
   5744 				mask |= ICR_TXQ(wmq->wmq_id);
   5745 				mask |= ICR_RXQ(wmq->wmq_id);
   5746 			}
   5747 			mask |= ICR_OTHER;
   5748 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5749 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5750 			break;
   5751 		default:
   5752 			if (sc->sc_type == WM_T_82575) {
   5753 				mask = 0;
   5754 				for (i = 0; i < sc->sc_nqueues; i++) {
   5755 					wmq = &sc->sc_queue[i];
   5756 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5757 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5758 				}
   5759 				mask |= EITR_OTHER;
   5760 			} else {
   5761 				mask = 0;
   5762 				for (i = 0; i < sc->sc_nqueues; i++) {
   5763 					wmq = &sc->sc_queue[i];
   5764 					mask |= 1 << wmq->wmq_intr_idx;
   5765 				}
   5766 				mask |= 1 << sc->sc_link_intr_idx;
   5767 			}
   5768 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5769 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5770 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5771 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5772 			break;
   5773 		}
   5774 	} else
   5775 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5776 
   5777 	/* Set up the inter-packet gap. */
   5778 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5779 
   5780 	if (sc->sc_type >= WM_T_82543) {
   5781 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5782 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5783 			wm_itrs_writereg(sc, wmq);
   5784 		}
   5785 		/*
   5786 		 * Link interrupts occur much less than TX
   5787 		 * interrupts and RX interrupts. So, we don't
   5788 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5789 		 * FreeBSD's if_igb.
   5790 		 */
   5791 	}
   5792 
   5793 	/* Set the VLAN ethernetype. */
   5794 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5795 
   5796 	/*
   5797 	 * Set up the transmit control register; we start out with
   5798 	 * a collision distance suitable for FDX, but update it whe
   5799 	 * we resolve the media type.
   5800 	 */
   5801 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5802 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5803 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5804 	if (sc->sc_type >= WM_T_82571)
   5805 		sc->sc_tctl |= TCTL_MULR;
   5806 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5807 
   5808 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5809 		/* Write TDT after TCTL.EN is set. See the document. */
   5810 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5811 	}
   5812 
   5813 	if (sc->sc_type == WM_T_80003) {
   5814 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5815 		reg &= ~TCTL_EXT_GCEX_MASK;
   5816 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5817 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5818 	}
   5819 
   5820 	/* Set the media. */
   5821 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5822 		goto out;
   5823 
   5824 	/* Configure for OS presence */
   5825 	wm_init_manageability(sc);
   5826 
   5827 	/*
   5828 	 * Set up the receive control register; we actually program
   5829 	 * the register when we set the receive filter.  Use multicast
   5830 	 * address offset type 0.
   5831 	 *
   5832 	 * Only the i82544 has the ability to strip the incoming
   5833 	 * CRC, so we don't enable that feature.
   5834 	 */
   5835 	sc->sc_mchash_type = 0;
   5836 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5837 	    | RCTL_MO(sc->sc_mchash_type);
   5838 
   5839 	/*
   5840 	 * 82574 use one buffer extended Rx descriptor.
   5841 	 */
   5842 	if (sc->sc_type == WM_T_82574)
   5843 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5844 
   5845 	/*
   5846 	 * The I350 has a bug where it always strips the CRC whether
   5847 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5848 	 */
   5849 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5850 	    || (sc->sc_type == WM_T_I210))
   5851 		sc->sc_rctl |= RCTL_SECRC;
   5852 
   5853 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5854 	    && (ifp->if_mtu > ETHERMTU)) {
   5855 		sc->sc_rctl |= RCTL_LPE;
   5856 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5857 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5858 	}
   5859 
   5860 	if (MCLBYTES == 2048) {
   5861 		sc->sc_rctl |= RCTL_2k;
   5862 	} else {
   5863 		if (sc->sc_type >= WM_T_82543) {
   5864 			switch (MCLBYTES) {
   5865 			case 4096:
   5866 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5867 				break;
   5868 			case 8192:
   5869 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5870 				break;
   5871 			case 16384:
   5872 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5873 				break;
   5874 			default:
   5875 				panic("wm_init: MCLBYTES %d unsupported",
   5876 				    MCLBYTES);
   5877 				break;
   5878 			}
   5879 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5880 	}
   5881 
   5882 	/* Enable ECC */
   5883 	switch (sc->sc_type) {
   5884 	case WM_T_82571:
   5885 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5886 		reg |= PBA_ECC_CORR_EN;
   5887 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5888 		break;
   5889 	case WM_T_PCH_LPT:
   5890 	case WM_T_PCH_SPT:
   5891 	case WM_T_PCH_CNP:
   5892 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5893 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5894 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5895 
   5896 		sc->sc_ctrl |= CTRL_MEHE;
   5897 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5898 		break;
   5899 	default:
   5900 		break;
   5901 	}
   5902 
   5903 	/*
   5904 	 * Set the receive filter.
   5905 	 *
   5906 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5907 	 * the setting of RCTL.EN in wm_set_filter()
   5908 	 */
   5909 	wm_set_filter(sc);
   5910 
   5911 	/* On 575 and later set RDT only if RX enabled */
   5912 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5913 		int qidx;
   5914 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5915 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5916 			for (i = 0; i < WM_NRXDESC; i++) {
   5917 				mutex_enter(rxq->rxq_lock);
   5918 				wm_init_rxdesc(rxq, i);
   5919 				mutex_exit(rxq->rxq_lock);
   5920 
   5921 			}
   5922 		}
   5923 	}
   5924 
   5925 	wm_unset_stopping_flags(sc);
   5926 
   5927 	/* Start the one second link check clock. */
   5928 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5929 
   5930 	/* ...all done! */
   5931 	ifp->if_flags |= IFF_RUNNING;
   5932 	ifp->if_flags &= ~IFF_OACTIVE;
   5933 
   5934  out:
   5935 	sc->sc_if_flags = ifp->if_flags;
   5936 	if (error)
   5937 		log(LOG_ERR, "%s: interface not running\n",
   5938 		    device_xname(sc->sc_dev));
   5939 	return error;
   5940 }
   5941 
   5942 /*
   5943  * wm_stop:		[ifnet interface function]
   5944  *
   5945  *	Stop transmission on the interface.
   5946  */
   5947 static void
   5948 wm_stop(struct ifnet *ifp, int disable)
   5949 {
   5950 	struct wm_softc *sc = ifp->if_softc;
   5951 
   5952 	WM_CORE_LOCK(sc);
   5953 	wm_stop_locked(ifp, disable);
   5954 	WM_CORE_UNLOCK(sc);
   5955 }
   5956 
   5957 static void
   5958 wm_stop_locked(struct ifnet *ifp, int disable)
   5959 {
   5960 	struct wm_softc *sc = ifp->if_softc;
   5961 	struct wm_txsoft *txs;
   5962 	int i, qidx;
   5963 
   5964 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5965 		device_xname(sc->sc_dev), __func__));
   5966 	KASSERT(WM_CORE_LOCKED(sc));
   5967 
   5968 	wm_set_stopping_flags(sc);
   5969 
   5970 	/* Stop the one second clock. */
   5971 	callout_stop(&sc->sc_tick_ch);
   5972 
   5973 	/* Stop the 82547 Tx FIFO stall check timer. */
   5974 	if (sc->sc_type == WM_T_82547)
   5975 		callout_stop(&sc->sc_txfifo_ch);
   5976 
   5977 	if (sc->sc_flags & WM_F_HAS_MII) {
   5978 		/* Down the MII. */
   5979 		mii_down(&sc->sc_mii);
   5980 	} else {
   5981 #if 0
   5982 		/* Should we clear PHY's status properly? */
   5983 		wm_reset(sc);
   5984 #endif
   5985 	}
   5986 
   5987 	/* Stop the transmit and receive processes. */
   5988 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5989 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5990 	sc->sc_rctl &= ~RCTL_EN;
   5991 
   5992 	/*
   5993 	 * Clear the interrupt mask to ensure the device cannot assert its
   5994 	 * interrupt line.
   5995 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5996 	 * service any currently pending or shared interrupt.
   5997 	 */
   5998 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5999 	sc->sc_icr = 0;
   6000 	if (wm_is_using_msix(sc)) {
   6001 		if (sc->sc_type != WM_T_82574) {
   6002 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6003 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6004 		} else
   6005 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6006 	}
   6007 
   6008 	/* Release any queued transmit buffers. */
   6009 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6010 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6011 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6012 		mutex_enter(txq->txq_lock);
   6013 		txq->txq_watchdog = false; /* ensure watchdog disabled */
   6014 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6015 			txs = &txq->txq_soft[i];
   6016 			if (txs->txs_mbuf != NULL) {
   6017 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6018 				m_freem(txs->txs_mbuf);
   6019 				txs->txs_mbuf = NULL;
   6020 			}
   6021 		}
   6022 		mutex_exit(txq->txq_lock);
   6023 	}
   6024 
   6025 	/* Mark the interface as down and cancel the watchdog timer. */
   6026 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6027 
   6028 	if (disable) {
   6029 		for (i = 0; i < sc->sc_nqueues; i++) {
   6030 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6031 			mutex_enter(rxq->rxq_lock);
   6032 			wm_rxdrain(rxq);
   6033 			mutex_exit(rxq->rxq_lock);
   6034 		}
   6035 	}
   6036 
   6037 #if 0 /* notyet */
   6038 	if (sc->sc_type >= WM_T_82544)
   6039 		CSR_WRITE(sc, WMREG_WUC, 0);
   6040 #endif
   6041 }
   6042 
   6043 static void
   6044 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6045 {
   6046 	struct mbuf *m;
   6047 	int i;
   6048 
   6049 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6050 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6051 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6052 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6053 		    m->m_data, m->m_len, m->m_flags);
   6054 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6055 	    i, i == 1 ? "" : "s");
   6056 }
   6057 
   6058 /*
   6059  * wm_82547_txfifo_stall:
   6060  *
   6061  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6062  *	reset the FIFO pointers, and restart packet transmission.
   6063  */
   6064 static void
   6065 wm_82547_txfifo_stall(void *arg)
   6066 {
   6067 	struct wm_softc *sc = arg;
   6068 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6069 
   6070 	mutex_enter(txq->txq_lock);
   6071 
   6072 	if (txq->txq_stopping)
   6073 		goto out;
   6074 
   6075 	if (txq->txq_fifo_stall) {
   6076 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6077 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6078 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6079 			/*
   6080 			 * Packets have drained.  Stop transmitter, reset
   6081 			 * FIFO pointers, restart transmitter, and kick
   6082 			 * the packet queue.
   6083 			 */
   6084 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6085 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6086 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6087 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6088 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6089 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6090 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6091 			CSR_WRITE_FLUSH(sc);
   6092 
   6093 			txq->txq_fifo_head = 0;
   6094 			txq->txq_fifo_stall = 0;
   6095 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6096 		} else {
   6097 			/*
   6098 			 * Still waiting for packets to drain; try again in
   6099 			 * another tick.
   6100 			 */
   6101 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6102 		}
   6103 	}
   6104 
   6105 out:
   6106 	mutex_exit(txq->txq_lock);
   6107 }
   6108 
   6109 /*
   6110  * wm_82547_txfifo_bugchk:
   6111  *
   6112  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6113  *	prevent enqueueing a packet that would wrap around the end
   6114  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6115  *
   6116  *	We do this by checking the amount of space before the end
   6117  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6118  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6119  *	the internal FIFO pointers to the beginning, and restart
   6120  *	transmission on the interface.
   6121  */
   6122 #define	WM_FIFO_HDR		0x10
   6123 #define	WM_82547_PAD_LEN	0x3e0
   6124 static int
   6125 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6126 {
   6127 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6128 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6129 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6130 
   6131 	/* Just return if already stalled. */
   6132 	if (txq->txq_fifo_stall)
   6133 		return 1;
   6134 
   6135 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6136 		/* Stall only occurs in half-duplex mode. */
   6137 		goto send_packet;
   6138 	}
   6139 
   6140 	if (len >= WM_82547_PAD_LEN + space) {
   6141 		txq->txq_fifo_stall = 1;
   6142 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6143 		return 1;
   6144 	}
   6145 
   6146  send_packet:
   6147 	txq->txq_fifo_head += len;
   6148 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6149 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6150 
   6151 	return 0;
   6152 }
   6153 
   6154 static int
   6155 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6156 {
   6157 	int error;
   6158 
   6159 	/*
   6160 	 * Allocate the control data structures, and create and load the
   6161 	 * DMA map for it.
   6162 	 *
   6163 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6164 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6165 	 * both sets within the same 4G segment.
   6166 	 */
   6167 	if (sc->sc_type < WM_T_82544)
   6168 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6169 	else
   6170 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6171 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6172 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6173 	else
   6174 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6175 
   6176 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6177 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6178 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6179 		aprint_error_dev(sc->sc_dev,
   6180 		    "unable to allocate TX control data, error = %d\n",
   6181 		    error);
   6182 		goto fail_0;
   6183 	}
   6184 
   6185 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6186 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6187 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6188 		aprint_error_dev(sc->sc_dev,
   6189 		    "unable to map TX control data, error = %d\n", error);
   6190 		goto fail_1;
   6191 	}
   6192 
   6193 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6194 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6195 		aprint_error_dev(sc->sc_dev,
   6196 		    "unable to create TX control data DMA map, error = %d\n",
   6197 		    error);
   6198 		goto fail_2;
   6199 	}
   6200 
   6201 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6202 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6203 		aprint_error_dev(sc->sc_dev,
   6204 		    "unable to load TX control data DMA map, error = %d\n",
   6205 		    error);
   6206 		goto fail_3;
   6207 	}
   6208 
   6209 	return 0;
   6210 
   6211  fail_3:
   6212 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6213  fail_2:
   6214 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6215 	    WM_TXDESCS_SIZE(txq));
   6216  fail_1:
   6217 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6218  fail_0:
   6219 	return error;
   6220 }
   6221 
   6222 static void
   6223 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6224 {
   6225 
   6226 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6227 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6228 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6229 	    WM_TXDESCS_SIZE(txq));
   6230 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6231 }
   6232 
   6233 static int
   6234 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6235 {
   6236 	int error;
   6237 	size_t rxq_descs_size;
   6238 
   6239 	/*
   6240 	 * Allocate the control data structures, and create and load the
   6241 	 * DMA map for it.
   6242 	 *
   6243 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6244 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6245 	 * both sets within the same 4G segment.
   6246 	 */
   6247 	rxq->rxq_ndesc = WM_NRXDESC;
   6248 	if (sc->sc_type == WM_T_82574)
   6249 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6250 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6251 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6252 	else
   6253 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6254 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6255 
   6256 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6257 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6258 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6259 		aprint_error_dev(sc->sc_dev,
   6260 		    "unable to allocate RX control data, error = %d\n",
   6261 		    error);
   6262 		goto fail_0;
   6263 	}
   6264 
   6265 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6266 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6267 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6268 		aprint_error_dev(sc->sc_dev,
   6269 		    "unable to map RX control data, error = %d\n", error);
   6270 		goto fail_1;
   6271 	}
   6272 
   6273 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6274 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6275 		aprint_error_dev(sc->sc_dev,
   6276 		    "unable to create RX control data DMA map, error = %d\n",
   6277 		    error);
   6278 		goto fail_2;
   6279 	}
   6280 
   6281 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6282 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6283 		aprint_error_dev(sc->sc_dev,
   6284 		    "unable to load RX control data DMA map, error = %d\n",
   6285 		    error);
   6286 		goto fail_3;
   6287 	}
   6288 
   6289 	return 0;
   6290 
   6291  fail_3:
   6292 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6293  fail_2:
   6294 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6295 	    rxq_descs_size);
   6296  fail_1:
   6297 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6298  fail_0:
   6299 	return error;
   6300 }
   6301 
   6302 static void
   6303 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6304 {
   6305 
   6306 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6307 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6308 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6309 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6310 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6311 }
   6312 
   6313 
   6314 static int
   6315 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6316 {
   6317 	int i, error;
   6318 
   6319 	/* Create the transmit buffer DMA maps. */
   6320 	WM_TXQUEUELEN(txq) =
   6321 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6322 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6323 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6324 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6325 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6326 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6327 			aprint_error_dev(sc->sc_dev,
   6328 			    "unable to create Tx DMA map %d, error = %d\n",
   6329 			    i, error);
   6330 			goto fail;
   6331 		}
   6332 	}
   6333 
   6334 	return 0;
   6335 
   6336  fail:
   6337 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6338 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6339 			bus_dmamap_destroy(sc->sc_dmat,
   6340 			    txq->txq_soft[i].txs_dmamap);
   6341 	}
   6342 	return error;
   6343 }
   6344 
   6345 static void
   6346 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6347 {
   6348 	int i;
   6349 
   6350 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6351 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6352 			bus_dmamap_destroy(sc->sc_dmat,
   6353 			    txq->txq_soft[i].txs_dmamap);
   6354 	}
   6355 }
   6356 
   6357 static int
   6358 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6359 {
   6360 	int i, error;
   6361 
   6362 	/* Create the receive buffer DMA maps. */
   6363 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6364 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6365 			    MCLBYTES, 0, 0,
   6366 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6367 			aprint_error_dev(sc->sc_dev,
   6368 			    "unable to create Rx DMA map %d error = %d\n",
   6369 			    i, error);
   6370 			goto fail;
   6371 		}
   6372 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6373 	}
   6374 
   6375 	return 0;
   6376 
   6377  fail:
   6378 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6379 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6380 			bus_dmamap_destroy(sc->sc_dmat,
   6381 			    rxq->rxq_soft[i].rxs_dmamap);
   6382 	}
   6383 	return error;
   6384 }
   6385 
   6386 static void
   6387 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6388 {
   6389 	int i;
   6390 
   6391 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6392 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6393 			bus_dmamap_destroy(sc->sc_dmat,
   6394 			    rxq->rxq_soft[i].rxs_dmamap);
   6395 	}
   6396 }
   6397 
   6398 /*
   6399  * wm_alloc_quques:
   6400  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6401  */
   6402 static int
   6403 wm_alloc_txrx_queues(struct wm_softc *sc)
   6404 {
   6405 	int i, error, tx_done, rx_done;
   6406 
   6407 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6408 	    KM_SLEEP);
   6409 	if (sc->sc_queue == NULL) {
   6410 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6411 		error = ENOMEM;
   6412 		goto fail_0;
   6413 	}
   6414 
   6415 	/*
   6416 	 * For transmission
   6417 	 */
   6418 	error = 0;
   6419 	tx_done = 0;
   6420 	for (i = 0; i < sc->sc_nqueues; i++) {
   6421 #ifdef WM_EVENT_COUNTERS
   6422 		int j;
   6423 		const char *xname;
   6424 #endif
   6425 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6426 		txq->txq_sc = sc;
   6427 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6428 
   6429 		error = wm_alloc_tx_descs(sc, txq);
   6430 		if (error)
   6431 			break;
   6432 		error = wm_alloc_tx_buffer(sc, txq);
   6433 		if (error) {
   6434 			wm_free_tx_descs(sc, txq);
   6435 			break;
   6436 		}
   6437 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6438 		if (txq->txq_interq == NULL) {
   6439 			wm_free_tx_descs(sc, txq);
   6440 			wm_free_tx_buffer(sc, txq);
   6441 			error = ENOMEM;
   6442 			break;
   6443 		}
   6444 
   6445 #ifdef WM_EVENT_COUNTERS
   6446 		xname = device_xname(sc->sc_dev);
   6447 
   6448 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6449 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6450 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6451 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6452 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6453 
   6454 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6455 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6456 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6457 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6460 
   6461 		for (j = 0; j < WM_NTXSEGS; j++) {
   6462 			snprintf(txq->txq_txseg_evcnt_names[j],
   6463 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6464 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6465 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6466 		}
   6467 
   6468 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6469 
   6470 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6471 #endif /* WM_EVENT_COUNTERS */
   6472 
   6473 		tx_done++;
   6474 	}
   6475 	if (error)
   6476 		goto fail_1;
   6477 
   6478 	/*
   6479 	 * For recieve
   6480 	 */
   6481 	error = 0;
   6482 	rx_done = 0;
   6483 	for (i = 0; i < sc->sc_nqueues; i++) {
   6484 #ifdef WM_EVENT_COUNTERS
   6485 		const char *xname;
   6486 #endif
   6487 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6488 		rxq->rxq_sc = sc;
   6489 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6490 
   6491 		error = wm_alloc_rx_descs(sc, rxq);
   6492 		if (error)
   6493 			break;
   6494 
   6495 		error = wm_alloc_rx_buffer(sc, rxq);
   6496 		if (error) {
   6497 			wm_free_rx_descs(sc, rxq);
   6498 			break;
   6499 		}
   6500 
   6501 #ifdef WM_EVENT_COUNTERS
   6502 		xname = device_xname(sc->sc_dev);
   6503 
   6504 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6505 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6506 
   6507 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6508 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6509 #endif /* WM_EVENT_COUNTERS */
   6510 
   6511 		rx_done++;
   6512 	}
   6513 	if (error)
   6514 		goto fail_2;
   6515 
   6516 	return 0;
   6517 
   6518  fail_2:
   6519 	for (i = 0; i < rx_done; i++) {
   6520 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6521 		wm_free_rx_buffer(sc, rxq);
   6522 		wm_free_rx_descs(sc, rxq);
   6523 		if (rxq->rxq_lock)
   6524 			mutex_obj_free(rxq->rxq_lock);
   6525 	}
   6526  fail_1:
   6527 	for (i = 0; i < tx_done; i++) {
   6528 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6529 		pcq_destroy(txq->txq_interq);
   6530 		wm_free_tx_buffer(sc, txq);
   6531 		wm_free_tx_descs(sc, txq);
   6532 		if (txq->txq_lock)
   6533 			mutex_obj_free(txq->txq_lock);
   6534 	}
   6535 
   6536 	kmem_free(sc->sc_queue,
   6537 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6538  fail_0:
   6539 	return error;
   6540 }
   6541 
   6542 /*
   6543  * wm_free_quques:
   6544  *	Free {tx,rx}descs and {tx,rx} buffers
   6545  */
   6546 static void
   6547 wm_free_txrx_queues(struct wm_softc *sc)
   6548 {
   6549 	int i;
   6550 
   6551 	for (i = 0; i < sc->sc_nqueues; i++) {
   6552 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6553 
   6554 #ifdef WM_EVENT_COUNTERS
   6555 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6556 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6557 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6558 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6559 #endif /* WM_EVENT_COUNTERS */
   6560 
   6561 		wm_free_rx_buffer(sc, rxq);
   6562 		wm_free_rx_descs(sc, rxq);
   6563 		if (rxq->rxq_lock)
   6564 			mutex_obj_free(rxq->rxq_lock);
   6565 	}
   6566 
   6567 	for (i = 0; i < sc->sc_nqueues; i++) {
   6568 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6569 		struct mbuf *m;
   6570 #ifdef WM_EVENT_COUNTERS
   6571 		int j;
   6572 
   6573 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6574 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6575 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6576 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6577 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6578 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6579 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6580 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6581 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6582 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6583 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6584 
   6585 		for (j = 0; j < WM_NTXSEGS; j++)
   6586 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6587 
   6588 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6589 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6590 #endif /* WM_EVENT_COUNTERS */
   6591 
   6592 		/* drain txq_interq */
   6593 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6594 			m_freem(m);
   6595 		pcq_destroy(txq->txq_interq);
   6596 
   6597 		wm_free_tx_buffer(sc, txq);
   6598 		wm_free_tx_descs(sc, txq);
   6599 		if (txq->txq_lock)
   6600 			mutex_obj_free(txq->txq_lock);
   6601 	}
   6602 
   6603 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6604 }
   6605 
   6606 static void
   6607 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6608 {
   6609 
   6610 	KASSERT(mutex_owned(txq->txq_lock));
   6611 
   6612 	/* Initialize the transmit descriptor ring. */
   6613 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6614 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6615 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6616 	txq->txq_free = WM_NTXDESC(txq);
   6617 	txq->txq_next = 0;
   6618 }
   6619 
   6620 static void
   6621 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6622     struct wm_txqueue *txq)
   6623 {
   6624 
   6625 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6626 		device_xname(sc->sc_dev), __func__));
   6627 	KASSERT(mutex_owned(txq->txq_lock));
   6628 
   6629 	if (sc->sc_type < WM_T_82543) {
   6630 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6631 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6632 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6633 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6634 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6635 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6636 	} else {
   6637 		int qid = wmq->wmq_id;
   6638 
   6639 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6640 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6641 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6642 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6643 
   6644 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6645 			/*
   6646 			 * Don't write TDT before TCTL.EN is set.
   6647 			 * See the document.
   6648 			 */
   6649 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6650 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6651 			    | TXDCTL_WTHRESH(0));
   6652 		else {
   6653 			/* XXX should update with AIM? */
   6654 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6655 			if (sc->sc_type >= WM_T_82540) {
   6656 				/* should be same */
   6657 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6658 			}
   6659 
   6660 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6661 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6662 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6663 		}
   6664 	}
   6665 }
   6666 
   6667 static void
   6668 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6669 {
   6670 	int i;
   6671 
   6672 	KASSERT(mutex_owned(txq->txq_lock));
   6673 
   6674 	/* Initialize the transmit job descriptors. */
   6675 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6676 		txq->txq_soft[i].txs_mbuf = NULL;
   6677 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6678 	txq->txq_snext = 0;
   6679 	txq->txq_sdirty = 0;
   6680 }
   6681 
   6682 static void
   6683 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6684     struct wm_txqueue *txq)
   6685 {
   6686 
   6687 	KASSERT(mutex_owned(txq->txq_lock));
   6688 
   6689 	/*
   6690 	 * Set up some register offsets that are different between
   6691 	 * the i82542 and the i82543 and later chips.
   6692 	 */
   6693 	if (sc->sc_type < WM_T_82543)
   6694 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6695 	else
   6696 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6697 
   6698 	wm_init_tx_descs(sc, txq);
   6699 	wm_init_tx_regs(sc, wmq, txq);
   6700 	wm_init_tx_buffer(sc, txq);
   6701 
   6702 	txq->txq_watchdog = false;
   6703 }
   6704 
   6705 static void
   6706 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6707     struct wm_rxqueue *rxq)
   6708 {
   6709 
   6710 	KASSERT(mutex_owned(rxq->rxq_lock));
   6711 
   6712 	/*
   6713 	 * Initialize the receive descriptor and receive job
   6714 	 * descriptor rings.
   6715 	 */
   6716 	if (sc->sc_type < WM_T_82543) {
   6717 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6718 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6719 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6720 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6721 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6722 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6723 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6724 
   6725 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6726 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6727 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6728 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6729 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6730 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6731 	} else {
   6732 		int qid = wmq->wmq_id;
   6733 
   6734 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6735 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6736 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6737 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6738 
   6739 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6740 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6741 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6742 
   6743 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6744 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6745 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6746 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6747 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6748 			    | RXDCTL_WTHRESH(1));
   6749 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6750 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6751 		} else {
   6752 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6753 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6754 			/* XXX should update with AIM? */
   6755 			CSR_WRITE(sc, WMREG_RDTR,
   6756 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6757 			/* MUST be same */
   6758 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6759 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6760 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6761 		}
   6762 	}
   6763 }
   6764 
   6765 static int
   6766 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6767 {
   6768 	struct wm_rxsoft *rxs;
   6769 	int error, i;
   6770 
   6771 	KASSERT(mutex_owned(rxq->rxq_lock));
   6772 
   6773 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6774 		rxs = &rxq->rxq_soft[i];
   6775 		if (rxs->rxs_mbuf == NULL) {
   6776 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6777 				log(LOG_ERR, "%s: unable to allocate or map "
   6778 				    "rx buffer %d, error = %d\n",
   6779 				    device_xname(sc->sc_dev), i, error);
   6780 				/*
   6781 				 * XXX Should attempt to run with fewer receive
   6782 				 * XXX buffers instead of just failing.
   6783 				 */
   6784 				wm_rxdrain(rxq);
   6785 				return ENOMEM;
   6786 			}
   6787 		} else {
   6788 			/*
   6789 			 * For 82575 and 82576, the RX descriptors must be
   6790 			 * initialized after the setting of RCTL.EN in
   6791 			 * wm_set_filter()
   6792 			 */
   6793 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6794 				wm_init_rxdesc(rxq, i);
   6795 		}
   6796 	}
   6797 	rxq->rxq_ptr = 0;
   6798 	rxq->rxq_discard = 0;
   6799 	WM_RXCHAIN_RESET(rxq);
   6800 
   6801 	return 0;
   6802 }
   6803 
   6804 static int
   6805 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6806     struct wm_rxqueue *rxq)
   6807 {
   6808 
   6809 	KASSERT(mutex_owned(rxq->rxq_lock));
   6810 
   6811 	/*
   6812 	 * Set up some register offsets that are different between
   6813 	 * the i82542 and the i82543 and later chips.
   6814 	 */
   6815 	if (sc->sc_type < WM_T_82543)
   6816 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6817 	else
   6818 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6819 
   6820 	wm_init_rx_regs(sc, wmq, rxq);
   6821 	return wm_init_rx_buffer(sc, rxq);
   6822 }
   6823 
   6824 /*
   6825  * wm_init_quques:
   6826  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6827  */
   6828 static int
   6829 wm_init_txrx_queues(struct wm_softc *sc)
   6830 {
   6831 	int i, error = 0;
   6832 
   6833 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6834 		device_xname(sc->sc_dev), __func__));
   6835 
   6836 	for (i = 0; i < sc->sc_nqueues; i++) {
   6837 		struct wm_queue *wmq = &sc->sc_queue[i];
   6838 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6839 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6840 
   6841 		/*
   6842 		 * TODO
   6843 		 * Currently, use constant variable instead of AIM.
   6844 		 * Furthermore, the interrupt interval of multiqueue which use
   6845 		 * polling mode is less than default value.
   6846 		 * More tuning and AIM are required.
   6847 		 */
   6848 		if (wm_is_using_multiqueue(sc))
   6849 			wmq->wmq_itr = 50;
   6850 		else
   6851 			wmq->wmq_itr = sc->sc_itr_init;
   6852 		wmq->wmq_set_itr = true;
   6853 
   6854 		mutex_enter(txq->txq_lock);
   6855 		wm_init_tx_queue(sc, wmq, txq);
   6856 		mutex_exit(txq->txq_lock);
   6857 
   6858 		mutex_enter(rxq->rxq_lock);
   6859 		error = wm_init_rx_queue(sc, wmq, rxq);
   6860 		mutex_exit(rxq->rxq_lock);
   6861 		if (error)
   6862 			break;
   6863 	}
   6864 
   6865 	return error;
   6866 }
   6867 
   6868 /*
   6869  * wm_tx_offload:
   6870  *
   6871  *	Set up TCP/IP checksumming parameters for the
   6872  *	specified packet.
   6873  */
   6874 static int
   6875 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6876     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6877 {
   6878 	struct mbuf *m0 = txs->txs_mbuf;
   6879 	struct livengood_tcpip_ctxdesc *t;
   6880 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6881 	uint32_t ipcse;
   6882 	struct ether_header *eh;
   6883 	int offset, iphl;
   6884 	uint8_t fields;
   6885 
   6886 	/*
   6887 	 * XXX It would be nice if the mbuf pkthdr had offset
   6888 	 * fields for the protocol headers.
   6889 	 */
   6890 
   6891 	eh = mtod(m0, struct ether_header *);
   6892 	switch (htons(eh->ether_type)) {
   6893 	case ETHERTYPE_IP:
   6894 	case ETHERTYPE_IPV6:
   6895 		offset = ETHER_HDR_LEN;
   6896 		break;
   6897 
   6898 	case ETHERTYPE_VLAN:
   6899 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6900 		break;
   6901 
   6902 	default:
   6903 		/*
   6904 		 * Don't support this protocol or encapsulation.
   6905 		 */
   6906 		*fieldsp = 0;
   6907 		*cmdp = 0;
   6908 		return 0;
   6909 	}
   6910 
   6911 	if ((m0->m_pkthdr.csum_flags &
   6912 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6913 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6914 	} else {
   6915 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6916 	}
   6917 	ipcse = offset + iphl - 1;
   6918 
   6919 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6920 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6921 	seg = 0;
   6922 	fields = 0;
   6923 
   6924 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6925 		int hlen = offset + iphl;
   6926 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6927 
   6928 		if (__predict_false(m0->m_len <
   6929 				    (hlen + sizeof(struct tcphdr)))) {
   6930 			/*
   6931 			 * TCP/IP headers are not in the first mbuf; we need
   6932 			 * to do this the slow and painful way.  Let's just
   6933 			 * hope this doesn't happen very often.
   6934 			 */
   6935 			struct tcphdr th;
   6936 
   6937 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6938 
   6939 			m_copydata(m0, hlen, sizeof(th), &th);
   6940 			if (v4) {
   6941 				struct ip ip;
   6942 
   6943 				m_copydata(m0, offset, sizeof(ip), &ip);
   6944 				ip.ip_len = 0;
   6945 				m_copyback(m0,
   6946 				    offset + offsetof(struct ip, ip_len),
   6947 				    sizeof(ip.ip_len), &ip.ip_len);
   6948 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6949 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6950 			} else {
   6951 				struct ip6_hdr ip6;
   6952 
   6953 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6954 				ip6.ip6_plen = 0;
   6955 				m_copyback(m0,
   6956 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6957 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6958 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6959 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6960 			}
   6961 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6962 			    sizeof(th.th_sum), &th.th_sum);
   6963 
   6964 			hlen += th.th_off << 2;
   6965 		} else {
   6966 			/*
   6967 			 * TCP/IP headers are in the first mbuf; we can do
   6968 			 * this the easy way.
   6969 			 */
   6970 			struct tcphdr *th;
   6971 
   6972 			if (v4) {
   6973 				struct ip *ip =
   6974 				    (void *)(mtod(m0, char *) + offset);
   6975 				th = (void *)(mtod(m0, char *) + hlen);
   6976 
   6977 				ip->ip_len = 0;
   6978 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6979 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6980 			} else {
   6981 				struct ip6_hdr *ip6 =
   6982 				    (void *)(mtod(m0, char *) + offset);
   6983 				th = (void *)(mtod(m0, char *) + hlen);
   6984 
   6985 				ip6->ip6_plen = 0;
   6986 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6987 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6988 			}
   6989 			hlen += th->th_off << 2;
   6990 		}
   6991 
   6992 		if (v4) {
   6993 			WM_Q_EVCNT_INCR(txq, txtso);
   6994 			cmdlen |= WTX_TCPIP_CMD_IP;
   6995 		} else {
   6996 			WM_Q_EVCNT_INCR(txq, txtso6);
   6997 			ipcse = 0;
   6998 		}
   6999 		cmd |= WTX_TCPIP_CMD_TSE;
   7000 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7001 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7002 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7003 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7004 	}
   7005 
   7006 	/*
   7007 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7008 	 * offload feature, if we load the context descriptor, we
   7009 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7010 	 */
   7011 
   7012 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7013 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7014 	    WTX_TCPIP_IPCSE(ipcse);
   7015 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7016 		WM_Q_EVCNT_INCR(txq, txipsum);
   7017 		fields |= WTX_IXSM;
   7018 	}
   7019 
   7020 	offset += iphl;
   7021 
   7022 	if (m0->m_pkthdr.csum_flags &
   7023 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7024 		WM_Q_EVCNT_INCR(txq, txtusum);
   7025 		fields |= WTX_TXSM;
   7026 		tucs = WTX_TCPIP_TUCSS(offset) |
   7027 		    WTX_TCPIP_TUCSO(offset +
   7028 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7029 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7030 	} else if ((m0->m_pkthdr.csum_flags &
   7031 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7032 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7033 		fields |= WTX_TXSM;
   7034 		tucs = WTX_TCPIP_TUCSS(offset) |
   7035 		    WTX_TCPIP_TUCSO(offset +
   7036 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7037 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7038 	} else {
   7039 		/* Just initialize it to a valid TCP context. */
   7040 		tucs = WTX_TCPIP_TUCSS(offset) |
   7041 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7042 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7043 	}
   7044 
   7045 	/*
   7046 	 * We don't have to write context descriptor for every packet
   7047 	 * except for 82574. For 82574, we must write context descriptor
   7048 	 * for every packet when we use two descriptor queues.
   7049 	 * It would be overhead to write context descriptor for every packet,
   7050 	 * however it does not cause problems.
   7051 	 */
   7052 	/* Fill in the context descriptor. */
   7053 	t = (struct livengood_tcpip_ctxdesc *)
   7054 	    &txq->txq_descs[txq->txq_next];
   7055 	t->tcpip_ipcs = htole32(ipcs);
   7056 	t->tcpip_tucs = htole32(tucs);
   7057 	t->tcpip_cmdlen = htole32(cmdlen);
   7058 	t->tcpip_seg = htole32(seg);
   7059 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7060 
   7061 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7062 	txs->txs_ndesc++;
   7063 
   7064 	*cmdp = cmd;
   7065 	*fieldsp = fields;
   7066 
   7067 	return 0;
   7068 }
   7069 
   7070 static inline int
   7071 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7072 {
   7073 	struct wm_softc *sc = ifp->if_softc;
   7074 	u_int cpuid = cpu_index(curcpu());
   7075 
   7076 	/*
   7077 	 * Currently, simple distribute strategy.
   7078 	 * TODO:
   7079 	 * distribute by flowid(RSS has value).
   7080 	 */
   7081         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7082 }
   7083 
   7084 /*
   7085  * wm_start:		[ifnet interface function]
   7086  *
   7087  *	Start packet transmission on the interface.
   7088  */
   7089 static void
   7090 wm_start(struct ifnet *ifp)
   7091 {
   7092 	struct wm_softc *sc = ifp->if_softc;
   7093 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7094 
   7095 #ifdef WM_MPSAFE
   7096 	KASSERT(if_is_mpsafe(ifp));
   7097 #endif
   7098 	/*
   7099 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7100 	 */
   7101 
   7102 	mutex_enter(txq->txq_lock);
   7103 	if (!txq->txq_stopping)
   7104 		wm_start_locked(ifp);
   7105 	mutex_exit(txq->txq_lock);
   7106 }
   7107 
   7108 static void
   7109 wm_start_locked(struct ifnet *ifp)
   7110 {
   7111 	struct wm_softc *sc = ifp->if_softc;
   7112 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7113 
   7114 	wm_send_common_locked(ifp, txq, false);
   7115 }
   7116 
   7117 static int
   7118 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7119 {
   7120 	int qid;
   7121 	struct wm_softc *sc = ifp->if_softc;
   7122 	struct wm_txqueue *txq;
   7123 
   7124 	qid = wm_select_txqueue(ifp, m);
   7125 	txq = &sc->sc_queue[qid].wmq_txq;
   7126 
   7127 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7128 		m_freem(m);
   7129 		WM_Q_EVCNT_INCR(txq, txdrop);
   7130 		return ENOBUFS;
   7131 	}
   7132 
   7133 	/*
   7134 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7135 	 */
   7136 	ifp->if_obytes += m->m_pkthdr.len;
   7137 	if (m->m_flags & M_MCAST)
   7138 		ifp->if_omcasts++;
   7139 
   7140 	if (mutex_tryenter(txq->txq_lock)) {
   7141 		if (!txq->txq_stopping)
   7142 			wm_transmit_locked(ifp, txq);
   7143 		mutex_exit(txq->txq_lock);
   7144 	}
   7145 
   7146 	return 0;
   7147 }
   7148 
   7149 static void
   7150 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7151 {
   7152 
   7153 	wm_send_common_locked(ifp, txq, true);
   7154 }
   7155 
   7156 static void
   7157 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7158     bool is_transmit)
   7159 {
   7160 	struct wm_softc *sc = ifp->if_softc;
   7161 	struct mbuf *m0;
   7162 	struct wm_txsoft *txs;
   7163 	bus_dmamap_t dmamap;
   7164 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7165 	bus_addr_t curaddr;
   7166 	bus_size_t seglen, curlen;
   7167 	uint32_t cksumcmd;
   7168 	uint8_t cksumfields;
   7169 
   7170 	KASSERT(mutex_owned(txq->txq_lock));
   7171 
   7172 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7173 		return;
   7174 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7175 		return;
   7176 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7177 		return;
   7178 
   7179 	/* Remember the previous number of free descriptors. */
   7180 	ofree = txq->txq_free;
   7181 
   7182 	/*
   7183 	 * Loop through the send queue, setting up transmit descriptors
   7184 	 * until we drain the queue, or use up all available transmit
   7185 	 * descriptors.
   7186 	 */
   7187 	for (;;) {
   7188 		m0 = NULL;
   7189 
   7190 		/* Get a work queue entry. */
   7191 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7192 			wm_txeof(txq, UINT_MAX);
   7193 			if (txq->txq_sfree == 0) {
   7194 				DPRINTF(WM_DEBUG_TX,
   7195 				    ("%s: TX: no free job descriptors\n",
   7196 					device_xname(sc->sc_dev)));
   7197 				WM_Q_EVCNT_INCR(txq, txsstall);
   7198 				break;
   7199 			}
   7200 		}
   7201 
   7202 		/* Grab a packet off the queue. */
   7203 		if (is_transmit)
   7204 			m0 = pcq_get(txq->txq_interq);
   7205 		else
   7206 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7207 		if (m0 == NULL)
   7208 			break;
   7209 
   7210 		DPRINTF(WM_DEBUG_TX,
   7211 		    ("%s: TX: have packet to transmit: %p\n",
   7212 		    device_xname(sc->sc_dev), m0));
   7213 
   7214 		txs = &txq->txq_soft[txq->txq_snext];
   7215 		dmamap = txs->txs_dmamap;
   7216 
   7217 		use_tso = (m0->m_pkthdr.csum_flags &
   7218 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7219 
   7220 		/*
   7221 		 * So says the Linux driver:
   7222 		 * The controller does a simple calculation to make sure
   7223 		 * there is enough room in the FIFO before initiating the
   7224 		 * DMA for each buffer.  The calc is:
   7225 		 *	4 = ceil(buffer len / MSS)
   7226 		 * To make sure we don't overrun the FIFO, adjust the max
   7227 		 * buffer len if the MSS drops.
   7228 		 */
   7229 		dmamap->dm_maxsegsz =
   7230 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7231 		    ? m0->m_pkthdr.segsz << 2
   7232 		    : WTX_MAX_LEN;
   7233 
   7234 		/*
   7235 		 * Load the DMA map.  If this fails, the packet either
   7236 		 * didn't fit in the allotted number of segments, or we
   7237 		 * were short on resources.  For the too-many-segments
   7238 		 * case, we simply report an error and drop the packet,
   7239 		 * since we can't sanely copy a jumbo packet to a single
   7240 		 * buffer.
   7241 		 */
   7242 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7243 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7244 		if (error) {
   7245 			if (error == EFBIG) {
   7246 				WM_Q_EVCNT_INCR(txq, txdrop);
   7247 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7248 				    "DMA segments, dropping...\n",
   7249 				    device_xname(sc->sc_dev));
   7250 				wm_dump_mbuf_chain(sc, m0);
   7251 				m_freem(m0);
   7252 				continue;
   7253 			}
   7254 			/*  Short on resources, just stop for now. */
   7255 			DPRINTF(WM_DEBUG_TX,
   7256 			    ("%s: TX: dmamap load failed: %d\n",
   7257 			    device_xname(sc->sc_dev), error));
   7258 			break;
   7259 		}
   7260 
   7261 		segs_needed = dmamap->dm_nsegs;
   7262 		if (use_tso) {
   7263 			/* For sentinel descriptor; see below. */
   7264 			segs_needed++;
   7265 		}
   7266 
   7267 		/*
   7268 		 * Ensure we have enough descriptors free to describe
   7269 		 * the packet.  Note, we always reserve one descriptor
   7270 		 * at the end of the ring due to the semantics of the
   7271 		 * TDT register, plus one more in the event we need
   7272 		 * to load offload context.
   7273 		 */
   7274 		if (segs_needed > txq->txq_free - 2) {
   7275 			/*
   7276 			 * Not enough free descriptors to transmit this
   7277 			 * packet.  We haven't committed anything yet,
   7278 			 * so just unload the DMA map, put the packet
   7279 			 * pack on the queue, and punt.  Notify the upper
   7280 			 * layer that there are no more slots left.
   7281 			 */
   7282 			DPRINTF(WM_DEBUG_TX,
   7283 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7284 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7285 			    segs_needed, txq->txq_free - 1));
   7286 			if (!is_transmit)
   7287 				ifp->if_flags |= IFF_OACTIVE;
   7288 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7289 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7290 			WM_Q_EVCNT_INCR(txq, txdstall);
   7291 			break;
   7292 		}
   7293 
   7294 		/*
   7295 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7296 		 * once we know we can transmit the packet, since we
   7297 		 * do some internal FIFO space accounting here.
   7298 		 */
   7299 		if (sc->sc_type == WM_T_82547 &&
   7300 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7301 			DPRINTF(WM_DEBUG_TX,
   7302 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7303 			    device_xname(sc->sc_dev)));
   7304 			if (!is_transmit)
   7305 				ifp->if_flags |= IFF_OACTIVE;
   7306 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7307 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7308 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7309 			break;
   7310 		}
   7311 
   7312 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7313 
   7314 		DPRINTF(WM_DEBUG_TX,
   7315 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7316 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7317 
   7318 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7319 
   7320 		/*
   7321 		 * Store a pointer to the packet so that we can free it
   7322 		 * later.
   7323 		 *
   7324 		 * Initially, we consider the number of descriptors the
   7325 		 * packet uses the number of DMA segments.  This may be
   7326 		 * incremented by 1 if we do checksum offload (a descriptor
   7327 		 * is used to set the checksum context).
   7328 		 */
   7329 		txs->txs_mbuf = m0;
   7330 		txs->txs_firstdesc = txq->txq_next;
   7331 		txs->txs_ndesc = segs_needed;
   7332 
   7333 		/* Set up offload parameters for this packet. */
   7334 		if (m0->m_pkthdr.csum_flags &
   7335 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7336 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7337 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7338 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7339 					  &cksumfields) != 0) {
   7340 				/* Error message already displayed. */
   7341 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7342 				continue;
   7343 			}
   7344 		} else {
   7345 			cksumcmd = 0;
   7346 			cksumfields = 0;
   7347 		}
   7348 
   7349 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7350 
   7351 		/* Sync the DMA map. */
   7352 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7353 		    BUS_DMASYNC_PREWRITE);
   7354 
   7355 		/* Initialize the transmit descriptor. */
   7356 		for (nexttx = txq->txq_next, seg = 0;
   7357 		     seg < dmamap->dm_nsegs; seg++) {
   7358 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7359 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7360 			     seglen != 0;
   7361 			     curaddr += curlen, seglen -= curlen,
   7362 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7363 				curlen = seglen;
   7364 
   7365 				/*
   7366 				 * So says the Linux driver:
   7367 				 * Work around for premature descriptor
   7368 				 * write-backs in TSO mode.  Append a
   7369 				 * 4-byte sentinel descriptor.
   7370 				 */
   7371 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7372 				    curlen > 8)
   7373 					curlen -= 4;
   7374 
   7375 				wm_set_dma_addr(
   7376 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7377 				txq->txq_descs[nexttx].wtx_cmdlen
   7378 				    = htole32(cksumcmd | curlen);
   7379 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7380 				    = 0;
   7381 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7382 				    = cksumfields;
   7383 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7384 				lasttx = nexttx;
   7385 
   7386 				DPRINTF(WM_DEBUG_TX,
   7387 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7388 				     "len %#04zx\n",
   7389 				    device_xname(sc->sc_dev), nexttx,
   7390 				    (uint64_t)curaddr, curlen));
   7391 			}
   7392 		}
   7393 
   7394 		KASSERT(lasttx != -1);
   7395 
   7396 		/*
   7397 		 * Set up the command byte on the last descriptor of
   7398 		 * the packet.  If we're in the interrupt delay window,
   7399 		 * delay the interrupt.
   7400 		 */
   7401 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7402 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7403 
   7404 		/*
   7405 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7406 		 * up the descriptor to encapsulate the packet for us.
   7407 		 *
   7408 		 * This is only valid on the last descriptor of the packet.
   7409 		 */
   7410 		if (vlan_has_tag(m0)) {
   7411 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7412 			    htole32(WTX_CMD_VLE);
   7413 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7414 			    = htole16(vlan_get_tag(m0));
   7415 		}
   7416 
   7417 		txs->txs_lastdesc = lasttx;
   7418 
   7419 		DPRINTF(WM_DEBUG_TX,
   7420 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7421 		    device_xname(sc->sc_dev),
   7422 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7423 
   7424 		/* Sync the descriptors we're using. */
   7425 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7426 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7427 
   7428 		/* Give the packet to the chip. */
   7429 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7430 
   7431 		DPRINTF(WM_DEBUG_TX,
   7432 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7433 
   7434 		DPRINTF(WM_DEBUG_TX,
   7435 		    ("%s: TX: finished transmitting packet, job %d\n",
   7436 		    device_xname(sc->sc_dev), txq->txq_snext));
   7437 
   7438 		/* Advance the tx pointer. */
   7439 		txq->txq_free -= txs->txs_ndesc;
   7440 		txq->txq_next = nexttx;
   7441 
   7442 		txq->txq_sfree--;
   7443 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7444 
   7445 		/* Pass the packet to any BPF listeners. */
   7446 		bpf_mtap(ifp, m0);
   7447 	}
   7448 
   7449 	if (m0 != NULL) {
   7450 		if (!is_transmit)
   7451 			ifp->if_flags |= IFF_OACTIVE;
   7452 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7453 		WM_Q_EVCNT_INCR(txq, txdrop);
   7454 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7455 			__func__));
   7456 		m_freem(m0);
   7457 	}
   7458 
   7459 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7460 		/* No more slots; notify upper layer. */
   7461 		if (!is_transmit)
   7462 			ifp->if_flags |= IFF_OACTIVE;
   7463 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7464 	}
   7465 
   7466 	if (txq->txq_free != ofree) {
   7467 		/* Set a watchdog timer in case the chip flakes out. */
   7468 		txq->txq_lastsent = time_uptime;
   7469 		txq->txq_watchdog = true;
   7470 	}
   7471 }
   7472 
   7473 /*
   7474  * wm_nq_tx_offload:
   7475  *
   7476  *	Set up TCP/IP checksumming parameters for the
   7477  *	specified packet, for NEWQUEUE devices
   7478  */
   7479 static int
   7480 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7481     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7482 {
   7483 	struct mbuf *m0 = txs->txs_mbuf;
   7484 	uint32_t vl_len, mssidx, cmdc;
   7485 	struct ether_header *eh;
   7486 	int offset, iphl;
   7487 
   7488 	/*
   7489 	 * XXX It would be nice if the mbuf pkthdr had offset
   7490 	 * fields for the protocol headers.
   7491 	 */
   7492 	*cmdlenp = 0;
   7493 	*fieldsp = 0;
   7494 
   7495 	eh = mtod(m0, struct ether_header *);
   7496 	switch (htons(eh->ether_type)) {
   7497 	case ETHERTYPE_IP:
   7498 	case ETHERTYPE_IPV6:
   7499 		offset = ETHER_HDR_LEN;
   7500 		break;
   7501 
   7502 	case ETHERTYPE_VLAN:
   7503 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7504 		break;
   7505 
   7506 	default:
   7507 		/* Don't support this protocol or encapsulation. */
   7508 		*do_csum = false;
   7509 		return 0;
   7510 	}
   7511 	*do_csum = true;
   7512 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7513 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7514 
   7515 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7516 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7517 
   7518 	if ((m0->m_pkthdr.csum_flags &
   7519 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7520 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7521 	} else {
   7522 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7523 	}
   7524 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7525 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7526 
   7527 	if (vlan_has_tag(m0)) {
   7528 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7529 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7530 		*cmdlenp |= NQTX_CMD_VLE;
   7531 	}
   7532 
   7533 	mssidx = 0;
   7534 
   7535 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7536 		int hlen = offset + iphl;
   7537 		int tcp_hlen;
   7538 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7539 
   7540 		if (__predict_false(m0->m_len <
   7541 				    (hlen + sizeof(struct tcphdr)))) {
   7542 			/*
   7543 			 * TCP/IP headers are not in the first mbuf; we need
   7544 			 * to do this the slow and painful way.  Let's just
   7545 			 * hope this doesn't happen very often.
   7546 			 */
   7547 			struct tcphdr th;
   7548 
   7549 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7550 
   7551 			m_copydata(m0, hlen, sizeof(th), &th);
   7552 			if (v4) {
   7553 				struct ip ip;
   7554 
   7555 				m_copydata(m0, offset, sizeof(ip), &ip);
   7556 				ip.ip_len = 0;
   7557 				m_copyback(m0,
   7558 				    offset + offsetof(struct ip, ip_len),
   7559 				    sizeof(ip.ip_len), &ip.ip_len);
   7560 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7561 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7562 			} else {
   7563 				struct ip6_hdr ip6;
   7564 
   7565 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7566 				ip6.ip6_plen = 0;
   7567 				m_copyback(m0,
   7568 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7569 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7570 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7571 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7572 			}
   7573 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7574 			    sizeof(th.th_sum), &th.th_sum);
   7575 
   7576 			tcp_hlen = th.th_off << 2;
   7577 		} else {
   7578 			/*
   7579 			 * TCP/IP headers are in the first mbuf; we can do
   7580 			 * this the easy way.
   7581 			 */
   7582 			struct tcphdr *th;
   7583 
   7584 			if (v4) {
   7585 				struct ip *ip =
   7586 				    (void *)(mtod(m0, char *) + offset);
   7587 				th = (void *)(mtod(m0, char *) + hlen);
   7588 
   7589 				ip->ip_len = 0;
   7590 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7591 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7592 			} else {
   7593 				struct ip6_hdr *ip6 =
   7594 				    (void *)(mtod(m0, char *) + offset);
   7595 				th = (void *)(mtod(m0, char *) + hlen);
   7596 
   7597 				ip6->ip6_plen = 0;
   7598 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7599 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7600 			}
   7601 			tcp_hlen = th->th_off << 2;
   7602 		}
   7603 		hlen += tcp_hlen;
   7604 		*cmdlenp |= NQTX_CMD_TSE;
   7605 
   7606 		if (v4) {
   7607 			WM_Q_EVCNT_INCR(txq, txtso);
   7608 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7609 		} else {
   7610 			WM_Q_EVCNT_INCR(txq, txtso6);
   7611 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7612 		}
   7613 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7614 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7615 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7616 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7617 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7618 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7619 	} else {
   7620 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7621 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7622 	}
   7623 
   7624 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7625 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7626 		cmdc |= NQTXC_CMD_IP4;
   7627 	}
   7628 
   7629 	if (m0->m_pkthdr.csum_flags &
   7630 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7631 		WM_Q_EVCNT_INCR(txq, txtusum);
   7632 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7633 			cmdc |= NQTXC_CMD_TCP;
   7634 		} else {
   7635 			cmdc |= NQTXC_CMD_UDP;
   7636 		}
   7637 		cmdc |= NQTXC_CMD_IP4;
   7638 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7639 	}
   7640 	if (m0->m_pkthdr.csum_flags &
   7641 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7642 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7643 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7644 			cmdc |= NQTXC_CMD_TCP;
   7645 		} else {
   7646 			cmdc |= NQTXC_CMD_UDP;
   7647 		}
   7648 		cmdc |= NQTXC_CMD_IP6;
   7649 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7650 	}
   7651 
   7652 	/*
   7653 	 * We don't have to write context descriptor for every packet to
   7654 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7655 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7656 	 * controllers.
   7657 	 * It would be overhead to write context descriptor for every packet,
   7658 	 * however it does not cause problems.
   7659 	 */
   7660 	/* Fill in the context descriptor. */
   7661 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7662 	    htole32(vl_len);
   7663 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7664 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7665 	    htole32(cmdc);
   7666 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7667 	    htole32(mssidx);
   7668 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7669 	DPRINTF(WM_DEBUG_TX,
   7670 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7671 	    txq->txq_next, 0, vl_len));
   7672 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7673 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7674 	txs->txs_ndesc++;
   7675 	return 0;
   7676 }
   7677 
   7678 /*
   7679  * wm_nq_start:		[ifnet interface function]
   7680  *
   7681  *	Start packet transmission on the interface for NEWQUEUE devices
   7682  */
   7683 static void
   7684 wm_nq_start(struct ifnet *ifp)
   7685 {
   7686 	struct wm_softc *sc = ifp->if_softc;
   7687 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7688 
   7689 #ifdef WM_MPSAFE
   7690 	KASSERT(if_is_mpsafe(ifp));
   7691 #endif
   7692 	/*
   7693 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7694 	 */
   7695 
   7696 	mutex_enter(txq->txq_lock);
   7697 	if (!txq->txq_stopping)
   7698 		wm_nq_start_locked(ifp);
   7699 	mutex_exit(txq->txq_lock);
   7700 }
   7701 
   7702 static void
   7703 wm_nq_start_locked(struct ifnet *ifp)
   7704 {
   7705 	struct wm_softc *sc = ifp->if_softc;
   7706 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7707 
   7708 	wm_nq_send_common_locked(ifp, txq, false);
   7709 }
   7710 
   7711 static int
   7712 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7713 {
   7714 	int qid;
   7715 	struct wm_softc *sc = ifp->if_softc;
   7716 	struct wm_txqueue *txq;
   7717 
   7718 	qid = wm_select_txqueue(ifp, m);
   7719 	txq = &sc->sc_queue[qid].wmq_txq;
   7720 
   7721 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7722 		m_freem(m);
   7723 		WM_Q_EVCNT_INCR(txq, txdrop);
   7724 		return ENOBUFS;
   7725 	}
   7726 
   7727 	/*
   7728 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7729 	 */
   7730 	ifp->if_obytes += m->m_pkthdr.len;
   7731 	if (m->m_flags & M_MCAST)
   7732 		ifp->if_omcasts++;
   7733 
   7734 	/*
   7735 	 * The situations which this mutex_tryenter() fails at running time
   7736 	 * are below two patterns.
   7737 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7738 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7739 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7740 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7741 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7742 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7743 	 * stuck, either.
   7744 	 */
   7745 	if (mutex_tryenter(txq->txq_lock)) {
   7746 		if (!txq->txq_stopping)
   7747 			wm_nq_transmit_locked(ifp, txq);
   7748 		mutex_exit(txq->txq_lock);
   7749 	}
   7750 
   7751 	return 0;
   7752 }
   7753 
   7754 static void
   7755 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7756 {
   7757 
   7758 	wm_nq_send_common_locked(ifp, txq, true);
   7759 }
   7760 
   7761 static void
   7762 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7763     bool is_transmit)
   7764 {
   7765 	struct wm_softc *sc = ifp->if_softc;
   7766 	struct mbuf *m0;
   7767 	struct wm_txsoft *txs;
   7768 	bus_dmamap_t dmamap;
   7769 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7770 	bool do_csum, sent;
   7771 
   7772 	KASSERT(mutex_owned(txq->txq_lock));
   7773 
   7774 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7775 		return;
   7776 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7777 		return;
   7778 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7779 		return;
   7780 
   7781 	sent = false;
   7782 
   7783 	/*
   7784 	 * Loop through the send queue, setting up transmit descriptors
   7785 	 * until we drain the queue, or use up all available transmit
   7786 	 * descriptors.
   7787 	 */
   7788 	for (;;) {
   7789 		m0 = NULL;
   7790 
   7791 		/* Get a work queue entry. */
   7792 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7793 			wm_txeof(txq, UINT_MAX);
   7794 			if (txq->txq_sfree == 0) {
   7795 				DPRINTF(WM_DEBUG_TX,
   7796 				    ("%s: TX: no free job descriptors\n",
   7797 					device_xname(sc->sc_dev)));
   7798 				WM_Q_EVCNT_INCR(txq, txsstall);
   7799 				break;
   7800 			}
   7801 		}
   7802 
   7803 		/* Grab a packet off the queue. */
   7804 		if (is_transmit)
   7805 			m0 = pcq_get(txq->txq_interq);
   7806 		else
   7807 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7808 		if (m0 == NULL)
   7809 			break;
   7810 
   7811 		DPRINTF(WM_DEBUG_TX,
   7812 		    ("%s: TX: have packet to transmit: %p\n",
   7813 		    device_xname(sc->sc_dev), m0));
   7814 
   7815 		txs = &txq->txq_soft[txq->txq_snext];
   7816 		dmamap = txs->txs_dmamap;
   7817 
   7818 		/*
   7819 		 * Load the DMA map.  If this fails, the packet either
   7820 		 * didn't fit in the allotted number of segments, or we
   7821 		 * were short on resources.  For the too-many-segments
   7822 		 * case, we simply report an error and drop the packet,
   7823 		 * since we can't sanely copy a jumbo packet to a single
   7824 		 * buffer.
   7825 		 */
   7826 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7827 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7828 		if (error) {
   7829 			if (error == EFBIG) {
   7830 				WM_Q_EVCNT_INCR(txq, txdrop);
   7831 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7832 				    "DMA segments, dropping...\n",
   7833 				    device_xname(sc->sc_dev));
   7834 				wm_dump_mbuf_chain(sc, m0);
   7835 				m_freem(m0);
   7836 				continue;
   7837 			}
   7838 			/* Short on resources, just stop for now. */
   7839 			DPRINTF(WM_DEBUG_TX,
   7840 			    ("%s: TX: dmamap load failed: %d\n",
   7841 			    device_xname(sc->sc_dev), error));
   7842 			break;
   7843 		}
   7844 
   7845 		segs_needed = dmamap->dm_nsegs;
   7846 
   7847 		/*
   7848 		 * Ensure we have enough descriptors free to describe
   7849 		 * the packet.  Note, we always reserve one descriptor
   7850 		 * at the end of the ring due to the semantics of the
   7851 		 * TDT register, plus one more in the event we need
   7852 		 * to load offload context.
   7853 		 */
   7854 		if (segs_needed > txq->txq_free - 2) {
   7855 			/*
   7856 			 * Not enough free descriptors to transmit this
   7857 			 * packet.  We haven't committed anything yet,
   7858 			 * so just unload the DMA map, put the packet
   7859 			 * pack on the queue, and punt.  Notify the upper
   7860 			 * layer that there are no more slots left.
   7861 			 */
   7862 			DPRINTF(WM_DEBUG_TX,
   7863 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7864 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7865 			    segs_needed, txq->txq_free - 1));
   7866 			if (!is_transmit)
   7867 				ifp->if_flags |= IFF_OACTIVE;
   7868 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7869 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7870 			WM_Q_EVCNT_INCR(txq, txdstall);
   7871 			break;
   7872 		}
   7873 
   7874 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7875 
   7876 		DPRINTF(WM_DEBUG_TX,
   7877 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7878 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7879 
   7880 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7881 
   7882 		/*
   7883 		 * Store a pointer to the packet so that we can free it
   7884 		 * later.
   7885 		 *
   7886 		 * Initially, we consider the number of descriptors the
   7887 		 * packet uses the number of DMA segments.  This may be
   7888 		 * incremented by 1 if we do checksum offload (a descriptor
   7889 		 * is used to set the checksum context).
   7890 		 */
   7891 		txs->txs_mbuf = m0;
   7892 		txs->txs_firstdesc = txq->txq_next;
   7893 		txs->txs_ndesc = segs_needed;
   7894 
   7895 		/* Set up offload parameters for this packet. */
   7896 		uint32_t cmdlen, fields, dcmdlen;
   7897 		if (m0->m_pkthdr.csum_flags &
   7898 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7899 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7900 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7901 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7902 			    &do_csum) != 0) {
   7903 				/* Error message already displayed. */
   7904 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7905 				continue;
   7906 			}
   7907 		} else {
   7908 			do_csum = false;
   7909 			cmdlen = 0;
   7910 			fields = 0;
   7911 		}
   7912 
   7913 		/* Sync the DMA map. */
   7914 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7915 		    BUS_DMASYNC_PREWRITE);
   7916 
   7917 		/* Initialize the first transmit descriptor. */
   7918 		nexttx = txq->txq_next;
   7919 		if (!do_csum) {
   7920 			/* setup a legacy descriptor */
   7921 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7922 			    dmamap->dm_segs[0].ds_addr);
   7923 			txq->txq_descs[nexttx].wtx_cmdlen =
   7924 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7925 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7926 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7927 			if (vlan_has_tag(m0)) {
   7928 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7929 				    htole32(WTX_CMD_VLE);
   7930 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7931 				    htole16(vlan_get_tag(m0));
   7932 			} else {
   7933 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7934 			}
   7935 			dcmdlen = 0;
   7936 		} else {
   7937 			/* setup an advanced data descriptor */
   7938 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7939 			    htole64(dmamap->dm_segs[0].ds_addr);
   7940 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7941 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7942 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7943 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7944 			    htole32(fields);
   7945 			DPRINTF(WM_DEBUG_TX,
   7946 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7947 			    device_xname(sc->sc_dev), nexttx,
   7948 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7949 			DPRINTF(WM_DEBUG_TX,
   7950 			    ("\t 0x%08x%08x\n", fields,
   7951 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7952 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7953 		}
   7954 
   7955 		lasttx = nexttx;
   7956 		nexttx = WM_NEXTTX(txq, nexttx);
   7957 		/*
   7958 		 * fill in the next descriptors. legacy or advanced format
   7959 		 * is the same here
   7960 		 */
   7961 		for (seg = 1; seg < dmamap->dm_nsegs;
   7962 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7963 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7964 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7965 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7966 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7967 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7968 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7969 			lasttx = nexttx;
   7970 
   7971 			DPRINTF(WM_DEBUG_TX,
   7972 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7973 			     "len %#04zx\n",
   7974 			    device_xname(sc->sc_dev), nexttx,
   7975 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7976 			    dmamap->dm_segs[seg].ds_len));
   7977 		}
   7978 
   7979 		KASSERT(lasttx != -1);
   7980 
   7981 		/*
   7982 		 * Set up the command byte on the last descriptor of
   7983 		 * the packet.  If we're in the interrupt delay window,
   7984 		 * delay the interrupt.
   7985 		 */
   7986 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7987 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7988 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7989 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7990 
   7991 		txs->txs_lastdesc = lasttx;
   7992 
   7993 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7994 		    device_xname(sc->sc_dev),
   7995 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7996 
   7997 		/* Sync the descriptors we're using. */
   7998 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7999 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8000 
   8001 		/* Give the packet to the chip. */
   8002 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8003 		sent = true;
   8004 
   8005 		DPRINTF(WM_DEBUG_TX,
   8006 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8007 
   8008 		DPRINTF(WM_DEBUG_TX,
   8009 		    ("%s: TX: finished transmitting packet, job %d\n",
   8010 		    device_xname(sc->sc_dev), txq->txq_snext));
   8011 
   8012 		/* Advance the tx pointer. */
   8013 		txq->txq_free -= txs->txs_ndesc;
   8014 		txq->txq_next = nexttx;
   8015 
   8016 		txq->txq_sfree--;
   8017 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8018 
   8019 		/* Pass the packet to any BPF listeners. */
   8020 		bpf_mtap(ifp, m0);
   8021 	}
   8022 
   8023 	if (m0 != NULL) {
   8024 		if (!is_transmit)
   8025 			ifp->if_flags |= IFF_OACTIVE;
   8026 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8027 		WM_Q_EVCNT_INCR(txq, txdrop);
   8028 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8029 			__func__));
   8030 		m_freem(m0);
   8031 	}
   8032 
   8033 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8034 		/* No more slots; notify upper layer. */
   8035 		if (!is_transmit)
   8036 			ifp->if_flags |= IFF_OACTIVE;
   8037 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8038 	}
   8039 
   8040 	if (sent) {
   8041 		/* Set a watchdog timer in case the chip flakes out. */
   8042 		txq->txq_lastsent = time_uptime;
   8043 		txq->txq_watchdog = true;
   8044 	}
   8045 }
   8046 
   8047 static void
   8048 wm_deferred_start_locked(struct wm_txqueue *txq)
   8049 {
   8050 	struct wm_softc *sc = txq->txq_sc;
   8051 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8052 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8053 	int qid = wmq->wmq_id;
   8054 
   8055 	KASSERT(mutex_owned(txq->txq_lock));
   8056 
   8057 	if (txq->txq_stopping) {
   8058 		mutex_exit(txq->txq_lock);
   8059 		return;
   8060 	}
   8061 
   8062 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8063 		/* XXX need for ALTQ or one CPU system */
   8064 		if (qid == 0)
   8065 			wm_nq_start_locked(ifp);
   8066 		wm_nq_transmit_locked(ifp, txq);
   8067 	} else {
   8068 		/* XXX need for ALTQ or one CPU system */
   8069 		if (qid == 0)
   8070 			wm_start_locked(ifp);
   8071 		wm_transmit_locked(ifp, txq);
   8072 	}
   8073 }
   8074 
   8075 /* Interrupt */
   8076 
   8077 /*
   8078  * wm_txeof:
   8079  *
   8080  *	Helper; handle transmit interrupts.
   8081  */
   8082 static bool
   8083 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8084 {
   8085 	struct wm_softc *sc = txq->txq_sc;
   8086 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8087 	struct wm_txsoft *txs;
   8088 	int count = 0;
   8089 	int i;
   8090 	uint8_t status;
   8091 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8092 	bool more = false;
   8093 
   8094 	KASSERT(mutex_owned(txq->txq_lock));
   8095 
   8096 	if (txq->txq_stopping)
   8097 		return false;
   8098 
   8099 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8100 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8101 	if (wmq->wmq_id == 0)
   8102 		ifp->if_flags &= ~IFF_OACTIVE;
   8103 
   8104 	/*
   8105 	 * Go through the Tx list and free mbufs for those
   8106 	 * frames which have been transmitted.
   8107 	 */
   8108 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8109 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8110 		if (limit-- == 0) {
   8111 			more = true;
   8112 			DPRINTF(WM_DEBUG_TX,
   8113 			    ("%s: TX: loop limited, job %d is not processed\n",
   8114 				device_xname(sc->sc_dev), i));
   8115 			break;
   8116 		}
   8117 
   8118 		txs = &txq->txq_soft[i];
   8119 
   8120 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8121 			device_xname(sc->sc_dev), i));
   8122 
   8123 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8124 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8125 
   8126 		status =
   8127 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8128 		if ((status & WTX_ST_DD) == 0) {
   8129 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8130 			    BUS_DMASYNC_PREREAD);
   8131 			break;
   8132 		}
   8133 
   8134 		count++;
   8135 		DPRINTF(WM_DEBUG_TX,
   8136 		    ("%s: TX: job %d done: descs %d..%d\n",
   8137 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8138 		    txs->txs_lastdesc));
   8139 
   8140 		/*
   8141 		 * XXX We should probably be using the statistics
   8142 		 * XXX registers, but I don't know if they exist
   8143 		 * XXX on chips before the i82544.
   8144 		 */
   8145 
   8146 #ifdef WM_EVENT_COUNTERS
   8147 		if (status & WTX_ST_TU)
   8148 			WM_Q_EVCNT_INCR(txq, tu);
   8149 #endif /* WM_EVENT_COUNTERS */
   8150 
   8151 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8152 			ifp->if_oerrors++;
   8153 			if (status & WTX_ST_LC)
   8154 				log(LOG_WARNING, "%s: late collision\n",
   8155 				    device_xname(sc->sc_dev));
   8156 			else if (status & WTX_ST_EC) {
   8157 				ifp->if_collisions += 16;
   8158 				log(LOG_WARNING, "%s: excessive collisions\n",
   8159 				    device_xname(sc->sc_dev));
   8160 			}
   8161 		} else
   8162 			ifp->if_opackets++;
   8163 
   8164 		txq->txq_packets++;
   8165 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8166 
   8167 		txq->txq_free += txs->txs_ndesc;
   8168 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8169 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8170 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8171 		m_freem(txs->txs_mbuf);
   8172 		txs->txs_mbuf = NULL;
   8173 	}
   8174 
   8175 	/* Update the dirty transmit buffer pointer. */
   8176 	txq->txq_sdirty = i;
   8177 	DPRINTF(WM_DEBUG_TX,
   8178 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8179 
   8180 	if (count != 0)
   8181 		rnd_add_uint32(&sc->rnd_source, count);
   8182 
   8183 	/*
   8184 	 * If there are no more pending transmissions, cancel the watchdog
   8185 	 * timer.
   8186 	 */
   8187 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8188 		txq->txq_watchdog = false;
   8189 
   8190 	return more;
   8191 }
   8192 
   8193 static inline uint32_t
   8194 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8195 {
   8196 	struct wm_softc *sc = rxq->rxq_sc;
   8197 
   8198 	if (sc->sc_type == WM_T_82574)
   8199 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8200 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8201 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8202 	else
   8203 		return rxq->rxq_descs[idx].wrx_status;
   8204 }
   8205 
   8206 static inline uint32_t
   8207 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8208 {
   8209 	struct wm_softc *sc = rxq->rxq_sc;
   8210 
   8211 	if (sc->sc_type == WM_T_82574)
   8212 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8213 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8214 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8215 	else
   8216 		return rxq->rxq_descs[idx].wrx_errors;
   8217 }
   8218 
   8219 static inline uint16_t
   8220 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8221 {
   8222 	struct wm_softc *sc = rxq->rxq_sc;
   8223 
   8224 	if (sc->sc_type == WM_T_82574)
   8225 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8226 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8227 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8228 	else
   8229 		return rxq->rxq_descs[idx].wrx_special;
   8230 }
   8231 
   8232 static inline int
   8233 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8234 {
   8235 	struct wm_softc *sc = rxq->rxq_sc;
   8236 
   8237 	if (sc->sc_type == WM_T_82574)
   8238 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8239 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8240 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8241 	else
   8242 		return rxq->rxq_descs[idx].wrx_len;
   8243 }
   8244 
   8245 #ifdef WM_DEBUG
   8246 static inline uint32_t
   8247 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8248 {
   8249 	struct wm_softc *sc = rxq->rxq_sc;
   8250 
   8251 	if (sc->sc_type == WM_T_82574)
   8252 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8253 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8254 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8255 	else
   8256 		return 0;
   8257 }
   8258 
   8259 static inline uint8_t
   8260 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8261 {
   8262 	struct wm_softc *sc = rxq->rxq_sc;
   8263 
   8264 	if (sc->sc_type == WM_T_82574)
   8265 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8266 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8267 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8268 	else
   8269 		return 0;
   8270 }
   8271 #endif /* WM_DEBUG */
   8272 
   8273 static inline bool
   8274 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8275     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8276 {
   8277 
   8278 	if (sc->sc_type == WM_T_82574)
   8279 		return (status & ext_bit) != 0;
   8280 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8281 		return (status & nq_bit) != 0;
   8282 	else
   8283 		return (status & legacy_bit) != 0;
   8284 }
   8285 
   8286 static inline bool
   8287 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8288     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8289 {
   8290 
   8291 	if (sc->sc_type == WM_T_82574)
   8292 		return (error & ext_bit) != 0;
   8293 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8294 		return (error & nq_bit) != 0;
   8295 	else
   8296 		return (error & legacy_bit) != 0;
   8297 }
   8298 
   8299 static inline bool
   8300 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8301 {
   8302 
   8303 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8304 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8305 		return true;
   8306 	else
   8307 		return false;
   8308 }
   8309 
   8310 static inline bool
   8311 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8312 {
   8313 	struct wm_softc *sc = rxq->rxq_sc;
   8314 
   8315 	/* XXXX missing error bit for newqueue? */
   8316 	if (wm_rxdesc_is_set_error(sc, errors,
   8317 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8318 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8319 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8320 		NQRXC_ERROR_RXE)) {
   8321 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8322 		    EXTRXC_ERROR_SE, 0))
   8323 			log(LOG_WARNING, "%s: symbol error\n",
   8324 			    device_xname(sc->sc_dev));
   8325 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8326 		    EXTRXC_ERROR_SEQ, 0))
   8327 			log(LOG_WARNING, "%s: receive sequence error\n",
   8328 			    device_xname(sc->sc_dev));
   8329 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8330 		    EXTRXC_ERROR_CE, 0))
   8331 			log(LOG_WARNING, "%s: CRC error\n",
   8332 			    device_xname(sc->sc_dev));
   8333 		return true;
   8334 	}
   8335 
   8336 	return false;
   8337 }
   8338 
   8339 static inline bool
   8340 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8341 {
   8342 	struct wm_softc *sc = rxq->rxq_sc;
   8343 
   8344 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8345 		NQRXC_STATUS_DD)) {
   8346 		/* We have processed all of the receive descriptors. */
   8347 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8348 		return false;
   8349 	}
   8350 
   8351 	return true;
   8352 }
   8353 
   8354 static inline bool
   8355 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8356     uint16_t vlantag, struct mbuf *m)
   8357 {
   8358 
   8359 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8360 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8361 		vlan_set_tag(m, le16toh(vlantag));
   8362 	}
   8363 
   8364 	return true;
   8365 }
   8366 
   8367 static inline void
   8368 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8369     uint32_t errors, struct mbuf *m)
   8370 {
   8371 	struct wm_softc *sc = rxq->rxq_sc;
   8372 
   8373 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8374 		if (wm_rxdesc_is_set_status(sc, status,
   8375 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8376 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8377 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8378 			if (wm_rxdesc_is_set_error(sc, errors,
   8379 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8380 				m->m_pkthdr.csum_flags |=
   8381 					M_CSUM_IPv4_BAD;
   8382 		}
   8383 		if (wm_rxdesc_is_set_status(sc, status,
   8384 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8385 			/*
   8386 			 * Note: we don't know if this was TCP or UDP,
   8387 			 * so we just set both bits, and expect the
   8388 			 * upper layers to deal.
   8389 			 */
   8390 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8391 			m->m_pkthdr.csum_flags |=
   8392 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8393 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8394 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8395 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8396 				m->m_pkthdr.csum_flags |=
   8397 					M_CSUM_TCP_UDP_BAD;
   8398 		}
   8399 	}
   8400 }
   8401 
   8402 /*
   8403  * wm_rxeof:
   8404  *
   8405  *	Helper; handle receive interrupts.
   8406  */
   8407 static bool
   8408 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8409 {
   8410 	struct wm_softc *sc = rxq->rxq_sc;
   8411 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8412 	struct wm_rxsoft *rxs;
   8413 	struct mbuf *m;
   8414 	int i, len;
   8415 	int count = 0;
   8416 	uint32_t status, errors;
   8417 	uint16_t vlantag;
   8418 	bool more = false;
   8419 
   8420 	KASSERT(mutex_owned(rxq->rxq_lock));
   8421 
   8422 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8423 		if (limit-- == 0) {
   8424 			rxq->rxq_ptr = i;
   8425 			more = true;
   8426 			DPRINTF(WM_DEBUG_RX,
   8427 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8428 				device_xname(sc->sc_dev), i));
   8429 			break;
   8430 		}
   8431 
   8432 		rxs = &rxq->rxq_soft[i];
   8433 
   8434 		DPRINTF(WM_DEBUG_RX,
   8435 		    ("%s: RX: checking descriptor %d\n",
   8436 		    device_xname(sc->sc_dev), i));
   8437 		wm_cdrxsync(rxq, i,
   8438 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8439 
   8440 		status = wm_rxdesc_get_status(rxq, i);
   8441 		errors = wm_rxdesc_get_errors(rxq, i);
   8442 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8443 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8444 #ifdef WM_DEBUG
   8445 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8446 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8447 #endif
   8448 
   8449 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8450 			/*
   8451 			 * Update the receive pointer holding rxq_lock
   8452 			 * consistent with increment counter.
   8453 			 */
   8454 			rxq->rxq_ptr = i;
   8455 			break;
   8456 		}
   8457 
   8458 		count++;
   8459 		if (__predict_false(rxq->rxq_discard)) {
   8460 			DPRINTF(WM_DEBUG_RX,
   8461 			    ("%s: RX: discarding contents of descriptor %d\n",
   8462 			    device_xname(sc->sc_dev), i));
   8463 			wm_init_rxdesc(rxq, i);
   8464 			if (wm_rxdesc_is_eop(rxq, status)) {
   8465 				/* Reset our state. */
   8466 				DPRINTF(WM_DEBUG_RX,
   8467 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8468 				    device_xname(sc->sc_dev)));
   8469 				rxq->rxq_discard = 0;
   8470 			}
   8471 			continue;
   8472 		}
   8473 
   8474 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8475 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8476 
   8477 		m = rxs->rxs_mbuf;
   8478 
   8479 		/*
   8480 		 * Add a new receive buffer to the ring, unless of
   8481 		 * course the length is zero. Treat the latter as a
   8482 		 * failed mapping.
   8483 		 */
   8484 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8485 			/*
   8486 			 * Failed, throw away what we've done so
   8487 			 * far, and discard the rest of the packet.
   8488 			 */
   8489 			ifp->if_ierrors++;
   8490 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8491 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8492 			wm_init_rxdesc(rxq, i);
   8493 			if (!wm_rxdesc_is_eop(rxq, status))
   8494 				rxq->rxq_discard = 1;
   8495 			if (rxq->rxq_head != NULL)
   8496 				m_freem(rxq->rxq_head);
   8497 			WM_RXCHAIN_RESET(rxq);
   8498 			DPRINTF(WM_DEBUG_RX,
   8499 			    ("%s: RX: Rx buffer allocation failed, "
   8500 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8501 			    rxq->rxq_discard ? " (discard)" : ""));
   8502 			continue;
   8503 		}
   8504 
   8505 		m->m_len = len;
   8506 		rxq->rxq_len += len;
   8507 		DPRINTF(WM_DEBUG_RX,
   8508 		    ("%s: RX: buffer at %p len %d\n",
   8509 		    device_xname(sc->sc_dev), m->m_data, len));
   8510 
   8511 		/* If this is not the end of the packet, keep looking. */
   8512 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8513 			WM_RXCHAIN_LINK(rxq, m);
   8514 			DPRINTF(WM_DEBUG_RX,
   8515 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8516 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8517 			continue;
   8518 		}
   8519 
   8520 		/*
   8521 		 * Okay, we have the entire packet now.  The chip is
   8522 		 * configured to include the FCS except I350 and I21[01]
   8523 		 * (not all chips can be configured to strip it),
   8524 		 * so we need to trim it.
   8525 		 * May need to adjust length of previous mbuf in the
   8526 		 * chain if the current mbuf is too short.
   8527 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8528 		 * is always set in I350, so we don't trim it.
   8529 		 */
   8530 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8531 		    && (sc->sc_type != WM_T_I210)
   8532 		    && (sc->sc_type != WM_T_I211)) {
   8533 			if (m->m_len < ETHER_CRC_LEN) {
   8534 				rxq->rxq_tail->m_len
   8535 				    -= (ETHER_CRC_LEN - m->m_len);
   8536 				m->m_len = 0;
   8537 			} else
   8538 				m->m_len -= ETHER_CRC_LEN;
   8539 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8540 		} else
   8541 			len = rxq->rxq_len;
   8542 
   8543 		WM_RXCHAIN_LINK(rxq, m);
   8544 
   8545 		*rxq->rxq_tailp = NULL;
   8546 		m = rxq->rxq_head;
   8547 
   8548 		WM_RXCHAIN_RESET(rxq);
   8549 
   8550 		DPRINTF(WM_DEBUG_RX,
   8551 		    ("%s: RX: have entire packet, len -> %d\n",
   8552 		    device_xname(sc->sc_dev), len));
   8553 
   8554 		/* If an error occurred, update stats and drop the packet. */
   8555 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8556 			m_freem(m);
   8557 			continue;
   8558 		}
   8559 
   8560 		/* No errors.  Receive the packet. */
   8561 		m_set_rcvif(m, ifp);
   8562 		m->m_pkthdr.len = len;
   8563 		/*
   8564 		 * TODO
   8565 		 * should be save rsshash and rsstype to this mbuf.
   8566 		 */
   8567 		DPRINTF(WM_DEBUG_RX,
   8568 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8569 			device_xname(sc->sc_dev), rsstype, rsshash));
   8570 
   8571 		/*
   8572 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8573 		 * for us.  Associate the tag with the packet.
   8574 		 */
   8575 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8576 			continue;
   8577 
   8578 		/* Set up checksum info for this packet. */
   8579 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8580 		/*
   8581 		 * Update the receive pointer holding rxq_lock consistent with
   8582 		 * increment counter.
   8583 		 */
   8584 		rxq->rxq_ptr = i;
   8585 		rxq->rxq_packets++;
   8586 		rxq->rxq_bytes += len;
   8587 		mutex_exit(rxq->rxq_lock);
   8588 
   8589 		/* Pass it on. */
   8590 		if_percpuq_enqueue(sc->sc_ipq, m);
   8591 
   8592 		mutex_enter(rxq->rxq_lock);
   8593 
   8594 		if (rxq->rxq_stopping)
   8595 			break;
   8596 	}
   8597 
   8598 	if (count != 0)
   8599 		rnd_add_uint32(&sc->rnd_source, count);
   8600 
   8601 	DPRINTF(WM_DEBUG_RX,
   8602 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8603 
   8604 	return more;
   8605 }
   8606 
   8607 /*
   8608  * wm_linkintr_gmii:
   8609  *
   8610  *	Helper; handle link interrupts for GMII.
   8611  */
   8612 static void
   8613 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8614 {
   8615 
   8616 	KASSERT(WM_CORE_LOCKED(sc));
   8617 
   8618 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8619 		__func__));
   8620 
   8621 	if (icr & ICR_LSC) {
   8622 		uint32_t reg;
   8623 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8624 
   8625 		if ((status & STATUS_LU) != 0) {
   8626 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8627 				device_xname(sc->sc_dev),
   8628 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8629 		} else {
   8630 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8631 				device_xname(sc->sc_dev)));
   8632 		}
   8633 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8634 			wm_gig_downshift_workaround_ich8lan(sc);
   8635 
   8636 		if ((sc->sc_type == WM_T_ICH8)
   8637 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8638 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8639 		}
   8640 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8641 			device_xname(sc->sc_dev)));
   8642 		mii_pollstat(&sc->sc_mii);
   8643 		if (sc->sc_type == WM_T_82543) {
   8644 			int miistatus, active;
   8645 
   8646 			/*
   8647 			 * With 82543, we need to force speed and
   8648 			 * duplex on the MAC equal to what the PHY
   8649 			 * speed and duplex configuration is.
   8650 			 */
   8651 			miistatus = sc->sc_mii.mii_media_status;
   8652 
   8653 			if (miistatus & IFM_ACTIVE) {
   8654 				active = sc->sc_mii.mii_media_active;
   8655 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8656 				switch (IFM_SUBTYPE(active)) {
   8657 				case IFM_10_T:
   8658 					sc->sc_ctrl |= CTRL_SPEED_10;
   8659 					break;
   8660 				case IFM_100_TX:
   8661 					sc->sc_ctrl |= CTRL_SPEED_100;
   8662 					break;
   8663 				case IFM_1000_T:
   8664 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8665 					break;
   8666 				default:
   8667 					/*
   8668 					 * fiber?
   8669 					 * Shoud not enter here.
   8670 					 */
   8671 					printf("unknown media (%x)\n", active);
   8672 					break;
   8673 				}
   8674 				if (active & IFM_FDX)
   8675 					sc->sc_ctrl |= CTRL_FD;
   8676 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8677 			}
   8678 		} else if (sc->sc_type == WM_T_PCH) {
   8679 			wm_k1_gig_workaround_hv(sc,
   8680 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8681 		}
   8682 
   8683 		if ((sc->sc_phytype == WMPHY_82578)
   8684 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8685 			== IFM_1000_T)) {
   8686 
   8687 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8688 				delay(200*1000); /* XXX too big */
   8689 
   8690 				/* Link stall fix for link up */
   8691 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8692 				    HV_MUX_DATA_CTRL,
   8693 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8694 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8695 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8696 				    HV_MUX_DATA_CTRL,
   8697 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8698 			}
   8699 		}
   8700 		/*
   8701 		 * I217 Packet Loss issue:
   8702 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8703 		 * on power up.
   8704 		 * Set the Beacon Duration for I217 to 8 usec
   8705 		 */
   8706 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8707 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8708 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8709 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8710 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8711 		}
   8712 
   8713 		/* XXX Work-around I218 hang issue */
   8714 		/* e1000_k1_workaround_lpt_lp() */
   8715 
   8716 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8717 			/*
   8718 			 * Set platform power management values for Latency
   8719 			 * Tolerance Reporting (LTR)
   8720 			 */
   8721 			wm_platform_pm_pch_lpt(sc,
   8722 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8723 				    != 0));
   8724 		}
   8725 
   8726 		/* FEXTNVM6 K1-off workaround */
   8727 		if (sc->sc_type == WM_T_PCH_SPT) {
   8728 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8729 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8730 			    & FEXTNVM6_K1_OFF_ENABLE)
   8731 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8732 			else
   8733 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8734 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8735 		}
   8736 	} else if (icr & ICR_RXSEQ) {
   8737 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8738 			device_xname(sc->sc_dev)));
   8739 	}
   8740 }
   8741 
   8742 /*
   8743  * wm_linkintr_tbi:
   8744  *
   8745  *	Helper; handle link interrupts for TBI mode.
   8746  */
   8747 static void
   8748 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8749 {
   8750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8751 	uint32_t status;
   8752 
   8753 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8754 		__func__));
   8755 
   8756 	status = CSR_READ(sc, WMREG_STATUS);
   8757 	if (icr & ICR_LSC) {
   8758 		if (status & STATUS_LU) {
   8759 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8760 			    device_xname(sc->sc_dev),
   8761 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8762 			/*
   8763 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8764 			 * so we should update sc->sc_ctrl
   8765 			 */
   8766 
   8767 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8768 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8769 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8770 			if (status & STATUS_FD)
   8771 				sc->sc_tctl |=
   8772 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8773 			else
   8774 				sc->sc_tctl |=
   8775 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8776 			if (sc->sc_ctrl & CTRL_TFCE)
   8777 				sc->sc_fcrtl |= FCRTL_XONE;
   8778 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8779 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8780 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8781 				      sc->sc_fcrtl);
   8782 			sc->sc_tbi_linkup = 1;
   8783 			if_link_state_change(ifp, LINK_STATE_UP);
   8784 		} else {
   8785 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8786 			    device_xname(sc->sc_dev)));
   8787 			sc->sc_tbi_linkup = 0;
   8788 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8789 		}
   8790 		/* Update LED */
   8791 		wm_tbi_serdes_set_linkled(sc);
   8792 	} else if (icr & ICR_RXSEQ) {
   8793 		DPRINTF(WM_DEBUG_LINK,
   8794 		    ("%s: LINK: Receive sequence error\n",
   8795 		    device_xname(sc->sc_dev)));
   8796 	}
   8797 }
   8798 
   8799 /*
   8800  * wm_linkintr_serdes:
   8801  *
   8802  *	Helper; handle link interrupts for TBI mode.
   8803  */
   8804 static void
   8805 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8806 {
   8807 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8808 	struct mii_data *mii = &sc->sc_mii;
   8809 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8810 	uint32_t pcs_adv, pcs_lpab, reg;
   8811 
   8812 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8813 		__func__));
   8814 
   8815 	if (icr & ICR_LSC) {
   8816 		/* Check PCS */
   8817 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8818 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8819 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8820 				device_xname(sc->sc_dev)));
   8821 			mii->mii_media_status |= IFM_ACTIVE;
   8822 			sc->sc_tbi_linkup = 1;
   8823 			if_link_state_change(ifp, LINK_STATE_UP);
   8824 		} else {
   8825 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8826 				device_xname(sc->sc_dev)));
   8827 			mii->mii_media_status |= IFM_NONE;
   8828 			sc->sc_tbi_linkup = 0;
   8829 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8830 			wm_tbi_serdes_set_linkled(sc);
   8831 			return;
   8832 		}
   8833 		mii->mii_media_active |= IFM_1000_SX;
   8834 		if ((reg & PCS_LSTS_FDX) != 0)
   8835 			mii->mii_media_active |= IFM_FDX;
   8836 		else
   8837 			mii->mii_media_active |= IFM_HDX;
   8838 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8839 			/* Check flow */
   8840 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8841 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8842 				DPRINTF(WM_DEBUG_LINK,
   8843 				    ("XXX LINKOK but not ACOMP\n"));
   8844 				return;
   8845 			}
   8846 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8847 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8848 			DPRINTF(WM_DEBUG_LINK,
   8849 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8850 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8851 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8852 				mii->mii_media_active |= IFM_FLOW
   8853 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8854 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8855 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8856 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8857 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8858 				mii->mii_media_active |= IFM_FLOW
   8859 				    | IFM_ETH_TXPAUSE;
   8860 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8861 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8862 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8863 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8864 				mii->mii_media_active |= IFM_FLOW
   8865 				    | IFM_ETH_RXPAUSE;
   8866 		}
   8867 		/* Update LED */
   8868 		wm_tbi_serdes_set_linkled(sc);
   8869 	} else {
   8870 		DPRINTF(WM_DEBUG_LINK,
   8871 		    ("%s: LINK: Receive sequence error\n",
   8872 		    device_xname(sc->sc_dev)));
   8873 	}
   8874 }
   8875 
   8876 /*
   8877  * wm_linkintr:
   8878  *
   8879  *	Helper; handle link interrupts.
   8880  */
   8881 static void
   8882 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8883 {
   8884 
   8885 	KASSERT(WM_CORE_LOCKED(sc));
   8886 
   8887 	if (sc->sc_flags & WM_F_HAS_MII)
   8888 		wm_linkintr_gmii(sc, icr);
   8889 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8890 	    && (sc->sc_type >= WM_T_82575))
   8891 		wm_linkintr_serdes(sc, icr);
   8892 	else
   8893 		wm_linkintr_tbi(sc, icr);
   8894 }
   8895 
   8896 /*
   8897  * wm_intr_legacy:
   8898  *
   8899  *	Interrupt service routine for INTx and MSI.
   8900  */
   8901 static int
   8902 wm_intr_legacy(void *arg)
   8903 {
   8904 	struct wm_softc *sc = arg;
   8905 	struct wm_queue *wmq = &sc->sc_queue[0];
   8906 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8907 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8908 	uint32_t icr, rndval = 0;
   8909 	int handled = 0;
   8910 
   8911 	while (1 /* CONSTCOND */) {
   8912 		icr = CSR_READ(sc, WMREG_ICR);
   8913 		if ((icr & sc->sc_icr) == 0)
   8914 			break;
   8915 		if (handled == 0) {
   8916 			DPRINTF(WM_DEBUG_TX,
   8917 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8918 		}
   8919 		if (rndval == 0)
   8920 			rndval = icr;
   8921 
   8922 		mutex_enter(rxq->rxq_lock);
   8923 
   8924 		if (rxq->rxq_stopping) {
   8925 			mutex_exit(rxq->rxq_lock);
   8926 			break;
   8927 		}
   8928 
   8929 		handled = 1;
   8930 
   8931 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8932 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8933 			DPRINTF(WM_DEBUG_RX,
   8934 			    ("%s: RX: got Rx intr 0x%08x\n",
   8935 			    device_xname(sc->sc_dev),
   8936 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8937 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8938 		}
   8939 #endif
   8940 		/*
   8941 		 * wm_rxeof() does *not* call upper layer functions directly,
   8942 		 * as if_percpuq_enqueue() just call softint_schedule().
   8943 		 * So, we can call wm_rxeof() in interrupt context.
   8944 		 */
   8945 		wm_rxeof(rxq, UINT_MAX);
   8946 
   8947 		mutex_exit(rxq->rxq_lock);
   8948 		mutex_enter(txq->txq_lock);
   8949 
   8950 		if (txq->txq_stopping) {
   8951 			mutex_exit(txq->txq_lock);
   8952 			break;
   8953 		}
   8954 
   8955 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8956 		if (icr & ICR_TXDW) {
   8957 			DPRINTF(WM_DEBUG_TX,
   8958 			    ("%s: TX: got TXDW interrupt\n",
   8959 			    device_xname(sc->sc_dev)));
   8960 			WM_Q_EVCNT_INCR(txq, txdw);
   8961 		}
   8962 #endif
   8963 		wm_txeof(txq, UINT_MAX);
   8964 
   8965 		mutex_exit(txq->txq_lock);
   8966 		WM_CORE_LOCK(sc);
   8967 
   8968 		if (sc->sc_core_stopping) {
   8969 			WM_CORE_UNLOCK(sc);
   8970 			break;
   8971 		}
   8972 
   8973 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8974 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8975 			wm_linkintr(sc, icr);
   8976 		}
   8977 
   8978 		WM_CORE_UNLOCK(sc);
   8979 
   8980 		if (icr & ICR_RXO) {
   8981 #if defined(WM_DEBUG)
   8982 			log(LOG_WARNING, "%s: Receive overrun\n",
   8983 			    device_xname(sc->sc_dev));
   8984 #endif /* defined(WM_DEBUG) */
   8985 		}
   8986 	}
   8987 
   8988 	rnd_add_uint32(&sc->rnd_source, rndval);
   8989 
   8990 	if (handled) {
   8991 		/* Try to get more packets going. */
   8992 		softint_schedule(wmq->wmq_si);
   8993 	}
   8994 
   8995 	return handled;
   8996 }
   8997 
   8998 static inline void
   8999 wm_txrxintr_disable(struct wm_queue *wmq)
   9000 {
   9001 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9002 
   9003 	if (sc->sc_type == WM_T_82574)
   9004 		CSR_WRITE(sc, WMREG_IMC,
   9005 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9006 	else if (sc->sc_type == WM_T_82575)
   9007 		CSR_WRITE(sc, WMREG_EIMC,
   9008 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9009 	else
   9010 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9011 }
   9012 
   9013 static inline void
   9014 wm_txrxintr_enable(struct wm_queue *wmq)
   9015 {
   9016 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9017 
   9018 	wm_itrs_calculate(sc, wmq);
   9019 
   9020 	/*
   9021 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9022 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9023 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9024 	 * while each wm_handle_queue(wmq) is runnig.
   9025 	 */
   9026 	if (sc->sc_type == WM_T_82574)
   9027 		CSR_WRITE(sc, WMREG_IMS,
   9028 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9029 	else if (sc->sc_type == WM_T_82575)
   9030 		CSR_WRITE(sc, WMREG_EIMS,
   9031 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9032 	else
   9033 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9034 }
   9035 
   9036 static int
   9037 wm_txrxintr_msix(void *arg)
   9038 {
   9039 	struct wm_queue *wmq = arg;
   9040 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9041 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9042 	struct wm_softc *sc = txq->txq_sc;
   9043 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9044 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9045 	bool txmore;
   9046 	bool rxmore;
   9047 
   9048 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9049 
   9050 	DPRINTF(WM_DEBUG_TX,
   9051 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9052 
   9053 	wm_txrxintr_disable(wmq);
   9054 
   9055 	mutex_enter(txq->txq_lock);
   9056 
   9057 	if (txq->txq_stopping) {
   9058 		mutex_exit(txq->txq_lock);
   9059 		return 0;
   9060 	}
   9061 
   9062 	WM_Q_EVCNT_INCR(txq, txdw);
   9063 	txmore = wm_txeof(txq, txlimit);
   9064 	/* wm_deferred start() is done in wm_handle_queue(). */
   9065 	mutex_exit(txq->txq_lock);
   9066 
   9067 	DPRINTF(WM_DEBUG_RX,
   9068 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9069 	mutex_enter(rxq->rxq_lock);
   9070 
   9071 	if (rxq->rxq_stopping) {
   9072 		mutex_exit(rxq->rxq_lock);
   9073 		return 0;
   9074 	}
   9075 
   9076 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9077 	rxmore = wm_rxeof(rxq, rxlimit);
   9078 	mutex_exit(rxq->rxq_lock);
   9079 
   9080 	wm_itrs_writereg(sc, wmq);
   9081 
   9082 	if (txmore || rxmore)
   9083 		softint_schedule(wmq->wmq_si);
   9084 	else
   9085 		wm_txrxintr_enable(wmq);
   9086 
   9087 	return 1;
   9088 }
   9089 
   9090 static void
   9091 wm_handle_queue(void *arg)
   9092 {
   9093 	struct wm_queue *wmq = arg;
   9094 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9095 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9096 	struct wm_softc *sc = txq->txq_sc;
   9097 	u_int txlimit = sc->sc_tx_process_limit;
   9098 	u_int rxlimit = sc->sc_rx_process_limit;
   9099 	bool txmore;
   9100 	bool rxmore;
   9101 
   9102 	mutex_enter(txq->txq_lock);
   9103 	if (txq->txq_stopping) {
   9104 		mutex_exit(txq->txq_lock);
   9105 		return;
   9106 	}
   9107 	txmore = wm_txeof(txq, txlimit);
   9108 	wm_deferred_start_locked(txq);
   9109 	mutex_exit(txq->txq_lock);
   9110 
   9111 	mutex_enter(rxq->rxq_lock);
   9112 	if (rxq->rxq_stopping) {
   9113 		mutex_exit(rxq->rxq_lock);
   9114 		return;
   9115 	}
   9116 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9117 	rxmore = wm_rxeof(rxq, rxlimit);
   9118 	mutex_exit(rxq->rxq_lock);
   9119 
   9120 	if (txmore || rxmore)
   9121 		softint_schedule(wmq->wmq_si);
   9122 	else
   9123 		wm_txrxintr_enable(wmq);
   9124 }
   9125 
   9126 /*
   9127  * wm_linkintr_msix:
   9128  *
   9129  *	Interrupt service routine for link status change for MSI-X.
   9130  */
   9131 static int
   9132 wm_linkintr_msix(void *arg)
   9133 {
   9134 	struct wm_softc *sc = arg;
   9135 	uint32_t reg;
   9136 	bool has_rxo;
   9137 
   9138 	DPRINTF(WM_DEBUG_LINK,
   9139 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9140 
   9141 	reg = CSR_READ(sc, WMREG_ICR);
   9142 	WM_CORE_LOCK(sc);
   9143 	if (sc->sc_core_stopping)
   9144 		goto out;
   9145 
   9146 	if((reg & ICR_LSC) != 0) {
   9147 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9148 		wm_linkintr(sc, ICR_LSC);
   9149 	}
   9150 
   9151 	/*
   9152 	 * XXX 82574 MSI-X mode workaround
   9153 	 *
   9154 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9155 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9156 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9157 	 * interrupts by writing WMREG_ICS to process receive packets.
   9158 	 */
   9159 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9160 #if defined(WM_DEBUG)
   9161 		log(LOG_WARNING, "%s: Receive overrun\n",
   9162 		    device_xname(sc->sc_dev));
   9163 #endif /* defined(WM_DEBUG) */
   9164 
   9165 		has_rxo = true;
   9166 		/*
   9167 		 * The RXO interrupt is very high rate when receive traffic is
   9168 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9169 		 * interrupts. ICR_OTHER will be enabled at the end of
   9170 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9171 		 * ICR_RXQ(1) interrupts.
   9172 		 */
   9173 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9174 
   9175 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9176 	}
   9177 
   9178 
   9179 
   9180 out:
   9181 	WM_CORE_UNLOCK(sc);
   9182 
   9183 	if (sc->sc_type == WM_T_82574) {
   9184 		if (!has_rxo)
   9185 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9186 		else
   9187 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9188 	} else if (sc->sc_type == WM_T_82575)
   9189 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9190 	else
   9191 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9192 
   9193 	return 1;
   9194 }
   9195 
   9196 /*
   9197  * Media related.
   9198  * GMII, SGMII, TBI (and SERDES)
   9199  */
   9200 
   9201 /* Common */
   9202 
   9203 /*
   9204  * wm_tbi_serdes_set_linkled:
   9205  *
   9206  *	Update the link LED on TBI and SERDES devices.
   9207  */
   9208 static void
   9209 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9210 {
   9211 
   9212 	if (sc->sc_tbi_linkup)
   9213 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9214 	else
   9215 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9216 
   9217 	/* 82540 or newer devices are active low */
   9218 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9219 
   9220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9221 }
   9222 
   9223 /* GMII related */
   9224 
   9225 /*
   9226  * wm_gmii_reset:
   9227  *
   9228  *	Reset the PHY.
   9229  */
   9230 static void
   9231 wm_gmii_reset(struct wm_softc *sc)
   9232 {
   9233 	uint32_t reg;
   9234 	int rv;
   9235 
   9236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9237 		device_xname(sc->sc_dev), __func__));
   9238 
   9239 	rv = sc->phy.acquire(sc);
   9240 	if (rv != 0) {
   9241 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9242 		    __func__);
   9243 		return;
   9244 	}
   9245 
   9246 	switch (sc->sc_type) {
   9247 	case WM_T_82542_2_0:
   9248 	case WM_T_82542_2_1:
   9249 		/* null */
   9250 		break;
   9251 	case WM_T_82543:
   9252 		/*
   9253 		 * With 82543, we need to force speed and duplex on the MAC
   9254 		 * equal to what the PHY speed and duplex configuration is.
   9255 		 * In addition, we need to perform a hardware reset on the PHY
   9256 		 * to take it out of reset.
   9257 		 */
   9258 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9259 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9260 
   9261 		/* The PHY reset pin is active-low. */
   9262 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9263 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9264 		    CTRL_EXT_SWDPIN(4));
   9265 		reg |= CTRL_EXT_SWDPIO(4);
   9266 
   9267 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9268 		CSR_WRITE_FLUSH(sc);
   9269 		delay(10*1000);
   9270 
   9271 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9272 		CSR_WRITE_FLUSH(sc);
   9273 		delay(150);
   9274 #if 0
   9275 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9276 #endif
   9277 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9278 		break;
   9279 	case WM_T_82544:	/* reset 10000us */
   9280 	case WM_T_82540:
   9281 	case WM_T_82545:
   9282 	case WM_T_82545_3:
   9283 	case WM_T_82546:
   9284 	case WM_T_82546_3:
   9285 	case WM_T_82541:
   9286 	case WM_T_82541_2:
   9287 	case WM_T_82547:
   9288 	case WM_T_82547_2:
   9289 	case WM_T_82571:	/* reset 100us */
   9290 	case WM_T_82572:
   9291 	case WM_T_82573:
   9292 	case WM_T_82574:
   9293 	case WM_T_82575:
   9294 	case WM_T_82576:
   9295 	case WM_T_82580:
   9296 	case WM_T_I350:
   9297 	case WM_T_I354:
   9298 	case WM_T_I210:
   9299 	case WM_T_I211:
   9300 	case WM_T_82583:
   9301 	case WM_T_80003:
   9302 		/* generic reset */
   9303 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9304 		CSR_WRITE_FLUSH(sc);
   9305 		delay(20000);
   9306 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9307 		CSR_WRITE_FLUSH(sc);
   9308 		delay(20000);
   9309 
   9310 		if ((sc->sc_type == WM_T_82541)
   9311 		    || (sc->sc_type == WM_T_82541_2)
   9312 		    || (sc->sc_type == WM_T_82547)
   9313 		    || (sc->sc_type == WM_T_82547_2)) {
   9314 			/* workaround for igp are done in igp_reset() */
   9315 			/* XXX add code to set LED after phy reset */
   9316 		}
   9317 		break;
   9318 	case WM_T_ICH8:
   9319 	case WM_T_ICH9:
   9320 	case WM_T_ICH10:
   9321 	case WM_T_PCH:
   9322 	case WM_T_PCH2:
   9323 	case WM_T_PCH_LPT:
   9324 	case WM_T_PCH_SPT:
   9325 	case WM_T_PCH_CNP:
   9326 		/* generic reset */
   9327 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9328 		CSR_WRITE_FLUSH(sc);
   9329 		delay(100);
   9330 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9331 		CSR_WRITE_FLUSH(sc);
   9332 		delay(150);
   9333 		break;
   9334 	default:
   9335 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9336 		    __func__);
   9337 		break;
   9338 	}
   9339 
   9340 	sc->phy.release(sc);
   9341 
   9342 	/* get_cfg_done */
   9343 	wm_get_cfg_done(sc);
   9344 
   9345 	/* extra setup */
   9346 	switch (sc->sc_type) {
   9347 	case WM_T_82542_2_0:
   9348 	case WM_T_82542_2_1:
   9349 	case WM_T_82543:
   9350 	case WM_T_82544:
   9351 	case WM_T_82540:
   9352 	case WM_T_82545:
   9353 	case WM_T_82545_3:
   9354 	case WM_T_82546:
   9355 	case WM_T_82546_3:
   9356 	case WM_T_82541_2:
   9357 	case WM_T_82547_2:
   9358 	case WM_T_82571:
   9359 	case WM_T_82572:
   9360 	case WM_T_82573:
   9361 	case WM_T_82574:
   9362 	case WM_T_82583:
   9363 	case WM_T_82575:
   9364 	case WM_T_82576:
   9365 	case WM_T_82580:
   9366 	case WM_T_I350:
   9367 	case WM_T_I354:
   9368 	case WM_T_I210:
   9369 	case WM_T_I211:
   9370 	case WM_T_80003:
   9371 		/* null */
   9372 		break;
   9373 	case WM_T_82541:
   9374 	case WM_T_82547:
   9375 		/* XXX Configure actively LED after PHY reset */
   9376 		break;
   9377 	case WM_T_ICH8:
   9378 	case WM_T_ICH9:
   9379 	case WM_T_ICH10:
   9380 	case WM_T_PCH:
   9381 	case WM_T_PCH2:
   9382 	case WM_T_PCH_LPT:
   9383 	case WM_T_PCH_SPT:
   9384 	case WM_T_PCH_CNP:
   9385 		wm_phy_post_reset(sc);
   9386 		break;
   9387 	default:
   9388 		panic("%s: unknown type\n", __func__);
   9389 		break;
   9390 	}
   9391 }
   9392 
   9393 /*
   9394  * Setup sc_phytype and mii_{read|write}reg.
   9395  *
   9396  *  To identify PHY type, correct read/write function should be selected.
   9397  * To select correct read/write function, PCI ID or MAC type are required
   9398  * without accessing PHY registers.
   9399  *
   9400  *  On the first call of this function, PHY ID is not known yet. Check
   9401  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9402  * result might be incorrect.
   9403  *
   9404  *  In the second call, PHY OUI and model is used to identify PHY type.
   9405  * It might not be perfpect because of the lack of compared entry, but it
   9406  * would be better than the first call.
   9407  *
   9408  *  If the detected new result and previous assumption is different,
   9409  * diagnous message will be printed.
   9410  */
   9411 static void
   9412 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9413     uint16_t phy_model)
   9414 {
   9415 	device_t dev = sc->sc_dev;
   9416 	struct mii_data *mii = &sc->sc_mii;
   9417 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9418 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9419 	mii_readreg_t new_readreg;
   9420 	mii_writereg_t new_writereg;
   9421 
   9422 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9423 		device_xname(sc->sc_dev), __func__));
   9424 
   9425 	if (mii->mii_readreg == NULL) {
   9426 		/*
   9427 		 *  This is the first call of this function. For ICH and PCH
   9428 		 * variants, it's difficult to determine the PHY access method
   9429 		 * by sc_type, so use the PCI product ID for some devices.
   9430 		 */
   9431 
   9432 		switch (sc->sc_pcidevid) {
   9433 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9434 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9435 			/* 82577 */
   9436 			new_phytype = WMPHY_82577;
   9437 			break;
   9438 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9439 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9440 			/* 82578 */
   9441 			new_phytype = WMPHY_82578;
   9442 			break;
   9443 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9444 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9445 			/* 82579 */
   9446 			new_phytype = WMPHY_82579;
   9447 			break;
   9448 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9449 		case PCI_PRODUCT_INTEL_82801I_BM:
   9450 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9451 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9452 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9453 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9454 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9455 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9456 			/* ICH8, 9, 10 with 82567 */
   9457 			new_phytype = WMPHY_BM;
   9458 			break;
   9459 		default:
   9460 			break;
   9461 		}
   9462 	} else {
   9463 		/* It's not the first call. Use PHY OUI and model */
   9464 		switch (phy_oui) {
   9465 		case MII_OUI_ATHEROS: /* XXX ??? */
   9466 			switch (phy_model) {
   9467 			case 0x0004: /* XXX */
   9468 				new_phytype = WMPHY_82578;
   9469 				break;
   9470 			default:
   9471 				break;
   9472 			}
   9473 			break;
   9474 		case MII_OUI_xxMARVELL:
   9475 			switch (phy_model) {
   9476 			case MII_MODEL_xxMARVELL_I210:
   9477 				new_phytype = WMPHY_I210;
   9478 				break;
   9479 			case MII_MODEL_xxMARVELL_E1011:
   9480 			case MII_MODEL_xxMARVELL_E1000_3:
   9481 			case MII_MODEL_xxMARVELL_E1000_5:
   9482 			case MII_MODEL_xxMARVELL_E1112:
   9483 				new_phytype = WMPHY_M88;
   9484 				break;
   9485 			case MII_MODEL_xxMARVELL_E1149:
   9486 				new_phytype = WMPHY_BM;
   9487 				break;
   9488 			case MII_MODEL_xxMARVELL_E1111:
   9489 			case MII_MODEL_xxMARVELL_I347:
   9490 			case MII_MODEL_xxMARVELL_E1512:
   9491 			case MII_MODEL_xxMARVELL_E1340M:
   9492 			case MII_MODEL_xxMARVELL_E1543:
   9493 				new_phytype = WMPHY_M88;
   9494 				break;
   9495 			case MII_MODEL_xxMARVELL_I82563:
   9496 				new_phytype = WMPHY_GG82563;
   9497 				break;
   9498 			default:
   9499 				break;
   9500 			}
   9501 			break;
   9502 		case MII_OUI_INTEL:
   9503 			switch (phy_model) {
   9504 			case MII_MODEL_INTEL_I82577:
   9505 				new_phytype = WMPHY_82577;
   9506 				break;
   9507 			case MII_MODEL_INTEL_I82579:
   9508 				new_phytype = WMPHY_82579;
   9509 				break;
   9510 			case MII_MODEL_INTEL_I217:
   9511 				new_phytype = WMPHY_I217;
   9512 				break;
   9513 			case MII_MODEL_INTEL_I82580:
   9514 			case MII_MODEL_INTEL_I350:
   9515 				new_phytype = WMPHY_82580;
   9516 				break;
   9517 			default:
   9518 				break;
   9519 			}
   9520 			break;
   9521 		case MII_OUI_yyINTEL:
   9522 			switch (phy_model) {
   9523 			case MII_MODEL_yyINTEL_I82562G:
   9524 			case MII_MODEL_yyINTEL_I82562EM:
   9525 			case MII_MODEL_yyINTEL_I82562ET:
   9526 				new_phytype = WMPHY_IFE;
   9527 				break;
   9528 			case MII_MODEL_yyINTEL_IGP01E1000:
   9529 				new_phytype = WMPHY_IGP;
   9530 				break;
   9531 			case MII_MODEL_yyINTEL_I82566:
   9532 				new_phytype = WMPHY_IGP_3;
   9533 				break;
   9534 			default:
   9535 				break;
   9536 			}
   9537 			break;
   9538 		default:
   9539 			break;
   9540 		}
   9541 		if (new_phytype == WMPHY_UNKNOWN)
   9542 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9543 			    __func__);
   9544 
   9545 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9546 		    && (sc->sc_phytype != new_phytype )) {
   9547 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9548 			    "was incorrect. PHY type from PHY ID = %u\n",
   9549 			    sc->sc_phytype, new_phytype);
   9550 		}
   9551 	}
   9552 
   9553 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9554 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9555 		/* SGMII */
   9556 		new_readreg = wm_sgmii_readreg;
   9557 		new_writereg = wm_sgmii_writereg;
   9558 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9559 		/* BM2 (phyaddr == 1) */
   9560 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9561 		    && (new_phytype != WMPHY_BM)
   9562 		    && (new_phytype != WMPHY_UNKNOWN))
   9563 			doubt_phytype = new_phytype;
   9564 		new_phytype = WMPHY_BM;
   9565 		new_readreg = wm_gmii_bm_readreg;
   9566 		new_writereg = wm_gmii_bm_writereg;
   9567 	} else if (sc->sc_type >= WM_T_PCH) {
   9568 		/* All PCH* use _hv_ */
   9569 		new_readreg = wm_gmii_hv_readreg;
   9570 		new_writereg = wm_gmii_hv_writereg;
   9571 	} else if (sc->sc_type >= WM_T_ICH8) {
   9572 		/* non-82567 ICH8, 9 and 10 */
   9573 		new_readreg = wm_gmii_i82544_readreg;
   9574 		new_writereg = wm_gmii_i82544_writereg;
   9575 	} else if (sc->sc_type >= WM_T_80003) {
   9576 		/* 80003 */
   9577 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9578 		    && (new_phytype != WMPHY_GG82563)
   9579 		    && (new_phytype != WMPHY_UNKNOWN))
   9580 			doubt_phytype = new_phytype;
   9581 		new_phytype = WMPHY_GG82563;
   9582 		new_readreg = wm_gmii_i80003_readreg;
   9583 		new_writereg = wm_gmii_i80003_writereg;
   9584 	} else if (sc->sc_type >= WM_T_I210) {
   9585 		/* I210 and I211 */
   9586 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9587 		    && (new_phytype != WMPHY_I210)
   9588 		    && (new_phytype != WMPHY_UNKNOWN))
   9589 			doubt_phytype = new_phytype;
   9590 		new_phytype = WMPHY_I210;
   9591 		new_readreg = wm_gmii_gs40g_readreg;
   9592 		new_writereg = wm_gmii_gs40g_writereg;
   9593 	} else if (sc->sc_type >= WM_T_82580) {
   9594 		/* 82580, I350 and I354 */
   9595 		new_readreg = wm_gmii_82580_readreg;
   9596 		new_writereg = wm_gmii_82580_writereg;
   9597 	} else if (sc->sc_type >= WM_T_82544) {
   9598 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9599 		new_readreg = wm_gmii_i82544_readreg;
   9600 		new_writereg = wm_gmii_i82544_writereg;
   9601 	} else {
   9602 		new_readreg = wm_gmii_i82543_readreg;
   9603 		new_writereg = wm_gmii_i82543_writereg;
   9604 	}
   9605 
   9606 	if (new_phytype == WMPHY_BM) {
   9607 		/* All BM use _bm_ */
   9608 		new_readreg = wm_gmii_bm_readreg;
   9609 		new_writereg = wm_gmii_bm_writereg;
   9610 	}
   9611 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9612 		/* All PCH* use _hv_ */
   9613 		new_readreg = wm_gmii_hv_readreg;
   9614 		new_writereg = wm_gmii_hv_writereg;
   9615 	}
   9616 
   9617 	/* Diag output */
   9618 	if (doubt_phytype != WMPHY_UNKNOWN)
   9619 		aprint_error_dev(dev, "Assumed new PHY type was "
   9620 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9621 		    new_phytype);
   9622 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9623 	    && (sc->sc_phytype != new_phytype ))
   9624 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9625 		    "was incorrect. New PHY type = %u\n",
   9626 		    sc->sc_phytype, new_phytype);
   9627 
   9628 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9629 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9630 
   9631 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9632 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9633 		    "function was incorrect.\n");
   9634 
   9635 	/* Update now */
   9636 	sc->sc_phytype = new_phytype;
   9637 	mii->mii_readreg = new_readreg;
   9638 	mii->mii_writereg = new_writereg;
   9639 }
   9640 
   9641 /*
   9642  * wm_get_phy_id_82575:
   9643  *
   9644  * Return PHY ID. Return -1 if it failed.
   9645  */
   9646 static int
   9647 wm_get_phy_id_82575(struct wm_softc *sc)
   9648 {
   9649 	uint32_t reg;
   9650 	int phyid = -1;
   9651 
   9652 	/* XXX */
   9653 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9654 		return -1;
   9655 
   9656 	if (wm_sgmii_uses_mdio(sc)) {
   9657 		switch (sc->sc_type) {
   9658 		case WM_T_82575:
   9659 		case WM_T_82576:
   9660 			reg = CSR_READ(sc, WMREG_MDIC);
   9661 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9662 			break;
   9663 		case WM_T_82580:
   9664 		case WM_T_I350:
   9665 		case WM_T_I354:
   9666 		case WM_T_I210:
   9667 		case WM_T_I211:
   9668 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9669 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9670 			break;
   9671 		default:
   9672 			return -1;
   9673 		}
   9674 	}
   9675 
   9676 	return phyid;
   9677 }
   9678 
   9679 
   9680 /*
   9681  * wm_gmii_mediainit:
   9682  *
   9683  *	Initialize media for use on 1000BASE-T devices.
   9684  */
   9685 static void
   9686 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9687 {
   9688 	device_t dev = sc->sc_dev;
   9689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9690 	struct mii_data *mii = &sc->sc_mii;
   9691 	uint32_t reg;
   9692 
   9693 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9694 		device_xname(sc->sc_dev), __func__));
   9695 
   9696 	/* We have GMII. */
   9697 	sc->sc_flags |= WM_F_HAS_MII;
   9698 
   9699 	if (sc->sc_type == WM_T_80003)
   9700 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9701 	else
   9702 		sc->sc_tipg = TIPG_1000T_DFLT;
   9703 
   9704 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9705 	if ((sc->sc_type == WM_T_82580)
   9706 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9707 	    || (sc->sc_type == WM_T_I211)) {
   9708 		reg = CSR_READ(sc, WMREG_PHPM);
   9709 		reg &= ~PHPM_GO_LINK_D;
   9710 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9711 	}
   9712 
   9713 	/*
   9714 	 * Let the chip set speed/duplex on its own based on
   9715 	 * signals from the PHY.
   9716 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9717 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9718 	 */
   9719 	sc->sc_ctrl |= CTRL_SLU;
   9720 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9721 
   9722 	/* Initialize our media structures and probe the GMII. */
   9723 	mii->mii_ifp = ifp;
   9724 
   9725 	mii->mii_statchg = wm_gmii_statchg;
   9726 
   9727 	/* get PHY control from SMBus to PCIe */
   9728 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9729 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9730 	    || (sc->sc_type == WM_T_PCH_CNP))
   9731 		wm_smbustopci(sc);
   9732 
   9733 	wm_gmii_reset(sc);
   9734 
   9735 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9736 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9737 	    wm_gmii_mediastatus);
   9738 
   9739 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9740 	    || (sc->sc_type == WM_T_82580)
   9741 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9742 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9743 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9744 			/* Attach only one port */
   9745 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9746 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9747 		} else {
   9748 			int i, id;
   9749 			uint32_t ctrl_ext;
   9750 
   9751 			id = wm_get_phy_id_82575(sc);
   9752 			if (id != -1) {
   9753 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9754 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9755 			}
   9756 			if ((id == -1)
   9757 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9758 				/* Power on sgmii phy if it is disabled */
   9759 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9760 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9761 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9762 				CSR_WRITE_FLUSH(sc);
   9763 				delay(300*1000); /* XXX too long */
   9764 
   9765 				/* from 1 to 8 */
   9766 				for (i = 1; i < 8; i++)
   9767 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9768 					    0xffffffff, i, MII_OFFSET_ANY,
   9769 					    MIIF_DOPAUSE);
   9770 
   9771 				/* restore previous sfp cage power state */
   9772 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9773 			}
   9774 		}
   9775 	} else {
   9776 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9777 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9778 	}
   9779 
   9780 	/*
   9781 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9782 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9783 	 */
   9784 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9785 		|| (sc->sc_type == WM_T_PCH_SPT)
   9786 		|| (sc->sc_type == WM_T_PCH_CNP))
   9787 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9788 		wm_set_mdio_slow_mode_hv(sc);
   9789 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9790 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9791 	}
   9792 
   9793 	/*
   9794 	 * (For ICH8 variants)
   9795 	 * If PHY detection failed, use BM's r/w function and retry.
   9796 	 */
   9797 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9798 		/* if failed, retry with *_bm_* */
   9799 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9800 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9801 		    sc->sc_phytype);
   9802 		sc->sc_phytype = WMPHY_BM;
   9803 		mii->mii_readreg = wm_gmii_bm_readreg;
   9804 		mii->mii_writereg = wm_gmii_bm_writereg;
   9805 
   9806 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9807 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9808 	}
   9809 
   9810 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9811 		/* Any PHY wasn't find */
   9812 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9813 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9814 		sc->sc_phytype = WMPHY_NONE;
   9815 	} else {
   9816 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9817 
   9818 		/*
   9819 		 * PHY Found! Check PHY type again by the second call of
   9820 		 * wm_gmii_setup_phytype.
   9821 		 */
   9822 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9823 		    child->mii_mpd_model);
   9824 
   9825 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9826 	}
   9827 }
   9828 
   9829 /*
   9830  * wm_gmii_mediachange:	[ifmedia interface function]
   9831  *
   9832  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9833  */
   9834 static int
   9835 wm_gmii_mediachange(struct ifnet *ifp)
   9836 {
   9837 	struct wm_softc *sc = ifp->if_softc;
   9838 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9839 	int rc;
   9840 
   9841 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9842 		device_xname(sc->sc_dev), __func__));
   9843 	if ((ifp->if_flags & IFF_UP) == 0)
   9844 		return 0;
   9845 
   9846 	/* Disable D0 LPLU. */
   9847 	wm_lplu_d0_disable(sc);
   9848 
   9849 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9850 	sc->sc_ctrl |= CTRL_SLU;
   9851 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9852 	    || (sc->sc_type > WM_T_82543)) {
   9853 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9854 	} else {
   9855 		sc->sc_ctrl &= ~CTRL_ASDE;
   9856 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9857 		if (ife->ifm_media & IFM_FDX)
   9858 			sc->sc_ctrl |= CTRL_FD;
   9859 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9860 		case IFM_10_T:
   9861 			sc->sc_ctrl |= CTRL_SPEED_10;
   9862 			break;
   9863 		case IFM_100_TX:
   9864 			sc->sc_ctrl |= CTRL_SPEED_100;
   9865 			break;
   9866 		case IFM_1000_T:
   9867 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9868 			break;
   9869 		default:
   9870 			panic("wm_gmii_mediachange: bad media 0x%x",
   9871 			    ife->ifm_media);
   9872 		}
   9873 	}
   9874 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9875 	CSR_WRITE_FLUSH(sc);
   9876 	if (sc->sc_type <= WM_T_82543)
   9877 		wm_gmii_reset(sc);
   9878 
   9879 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9880 		return 0;
   9881 	return rc;
   9882 }
   9883 
   9884 /*
   9885  * wm_gmii_mediastatus:	[ifmedia interface function]
   9886  *
   9887  *	Get the current interface media status on a 1000BASE-T device.
   9888  */
   9889 static void
   9890 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9891 {
   9892 	struct wm_softc *sc = ifp->if_softc;
   9893 
   9894 	ether_mediastatus(ifp, ifmr);
   9895 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9896 	    | sc->sc_flowflags;
   9897 }
   9898 
   9899 #define	MDI_IO		CTRL_SWDPIN(2)
   9900 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9901 #define	MDI_CLK		CTRL_SWDPIN(3)
   9902 
   9903 static void
   9904 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9905 {
   9906 	uint32_t i, v;
   9907 
   9908 	v = CSR_READ(sc, WMREG_CTRL);
   9909 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9910 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9911 
   9912 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9913 		if (data & i)
   9914 			v |= MDI_IO;
   9915 		else
   9916 			v &= ~MDI_IO;
   9917 		CSR_WRITE(sc, WMREG_CTRL, v);
   9918 		CSR_WRITE_FLUSH(sc);
   9919 		delay(10);
   9920 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9921 		CSR_WRITE_FLUSH(sc);
   9922 		delay(10);
   9923 		CSR_WRITE(sc, WMREG_CTRL, v);
   9924 		CSR_WRITE_FLUSH(sc);
   9925 		delay(10);
   9926 	}
   9927 }
   9928 
   9929 static uint32_t
   9930 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9931 {
   9932 	uint32_t v, i, data = 0;
   9933 
   9934 	v = CSR_READ(sc, WMREG_CTRL);
   9935 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9936 	v |= CTRL_SWDPIO(3);
   9937 
   9938 	CSR_WRITE(sc, WMREG_CTRL, v);
   9939 	CSR_WRITE_FLUSH(sc);
   9940 	delay(10);
   9941 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9942 	CSR_WRITE_FLUSH(sc);
   9943 	delay(10);
   9944 	CSR_WRITE(sc, WMREG_CTRL, v);
   9945 	CSR_WRITE_FLUSH(sc);
   9946 	delay(10);
   9947 
   9948 	for (i = 0; i < 16; i++) {
   9949 		data <<= 1;
   9950 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9951 		CSR_WRITE_FLUSH(sc);
   9952 		delay(10);
   9953 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9954 			data |= 1;
   9955 		CSR_WRITE(sc, WMREG_CTRL, v);
   9956 		CSR_WRITE_FLUSH(sc);
   9957 		delay(10);
   9958 	}
   9959 
   9960 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9961 	CSR_WRITE_FLUSH(sc);
   9962 	delay(10);
   9963 	CSR_WRITE(sc, WMREG_CTRL, v);
   9964 	CSR_WRITE_FLUSH(sc);
   9965 	delay(10);
   9966 
   9967 	return data;
   9968 }
   9969 
   9970 #undef MDI_IO
   9971 #undef MDI_DIR
   9972 #undef MDI_CLK
   9973 
   9974 /*
   9975  * wm_gmii_i82543_readreg:	[mii interface function]
   9976  *
   9977  *	Read a PHY register on the GMII (i82543 version).
   9978  */
   9979 static int
   9980 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9981 {
   9982 	struct wm_softc *sc = device_private(dev);
   9983 	int rv;
   9984 
   9985 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9986 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9987 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9988 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9989 
   9990 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9991 	    device_xname(dev), phy, reg, rv));
   9992 
   9993 	return rv;
   9994 }
   9995 
   9996 /*
   9997  * wm_gmii_i82543_writereg:	[mii interface function]
   9998  *
   9999  *	Write a PHY register on the GMII (i82543 version).
   10000  */
   10001 static void
   10002 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10003 {
   10004 	struct wm_softc *sc = device_private(dev);
   10005 
   10006 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10007 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10008 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10009 	    (MII_COMMAND_START << 30), 32);
   10010 }
   10011 
   10012 /*
   10013  * wm_gmii_mdic_readreg:	[mii interface function]
   10014  *
   10015  *	Read a PHY register on the GMII.
   10016  */
   10017 static int
   10018 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10019 {
   10020 	struct wm_softc *sc = device_private(dev);
   10021 	uint32_t mdic = 0;
   10022 	int i, rv;
   10023 
   10024 	if (reg > MII_ADDRMASK) {
   10025 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10026 		    __func__, sc->sc_phytype, reg);
   10027 		reg &= MII_ADDRMASK;
   10028 	}
   10029 
   10030 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10031 	    MDIC_REGADD(reg));
   10032 
   10033 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10034 		mdic = CSR_READ(sc, WMREG_MDIC);
   10035 		if (mdic & MDIC_READY)
   10036 			break;
   10037 		delay(50);
   10038 	}
   10039 
   10040 	if ((mdic & MDIC_READY) == 0) {
   10041 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10042 		    device_xname(dev), phy, reg);
   10043 		rv = 0;
   10044 	} else if (mdic & MDIC_E) {
   10045 #if 0 /* This is normal if no PHY is present. */
   10046 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10047 		    device_xname(dev), phy, reg);
   10048 #endif
   10049 		rv = 0;
   10050 	} else {
   10051 		rv = MDIC_DATA(mdic);
   10052 		if (rv == 0xffff)
   10053 			rv = 0;
   10054 	}
   10055 
   10056 	return rv;
   10057 }
   10058 
   10059 /*
   10060  * wm_gmii_mdic_writereg:	[mii interface function]
   10061  *
   10062  *	Write a PHY register on the GMII.
   10063  */
   10064 static void
   10065 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10066 {
   10067 	struct wm_softc *sc = device_private(dev);
   10068 	uint32_t mdic = 0;
   10069 	int i;
   10070 
   10071 	if (reg > MII_ADDRMASK) {
   10072 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10073 		    __func__, sc->sc_phytype, reg);
   10074 		reg &= MII_ADDRMASK;
   10075 	}
   10076 
   10077 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10078 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10079 
   10080 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10081 		mdic = CSR_READ(sc, WMREG_MDIC);
   10082 		if (mdic & MDIC_READY)
   10083 			break;
   10084 		delay(50);
   10085 	}
   10086 
   10087 	if ((mdic & MDIC_READY) == 0)
   10088 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10089 		    device_xname(dev), phy, reg);
   10090 	else if (mdic & MDIC_E)
   10091 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10092 		    device_xname(dev), phy, reg);
   10093 }
   10094 
   10095 /*
   10096  * wm_gmii_i82544_readreg:	[mii interface function]
   10097  *
   10098  *	Read a PHY register on the GMII.
   10099  */
   10100 static int
   10101 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10102 {
   10103 	struct wm_softc *sc = device_private(dev);
   10104 	int rv;
   10105 
   10106 	if (sc->phy.acquire(sc)) {
   10107 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10108 		return 0;
   10109 	}
   10110 
   10111 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10112 		switch (sc->sc_phytype) {
   10113 		case WMPHY_IGP:
   10114 		case WMPHY_IGP_2:
   10115 		case WMPHY_IGP_3:
   10116 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10117 			    reg);
   10118 			break;
   10119 		default:
   10120 #ifdef WM_DEBUG
   10121 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10122 			    __func__, sc->sc_phytype, reg);
   10123 #endif
   10124 			break;
   10125 		}
   10126 	}
   10127 
   10128 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10129 	sc->phy.release(sc);
   10130 
   10131 	return rv;
   10132 }
   10133 
   10134 /*
   10135  * wm_gmii_i82544_writereg:	[mii interface function]
   10136  *
   10137  *	Write a PHY register on the GMII.
   10138  */
   10139 static void
   10140 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10141 {
   10142 	struct wm_softc *sc = device_private(dev);
   10143 
   10144 	if (sc->phy.acquire(sc)) {
   10145 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10146 		return;
   10147 	}
   10148 
   10149 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10150 		switch (sc->sc_phytype) {
   10151 		case WMPHY_IGP:
   10152 		case WMPHY_IGP_2:
   10153 		case WMPHY_IGP_3:
   10154 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10155 			    reg);
   10156 			break;
   10157 		default:
   10158 #ifdef WM_DEBUG
   10159 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10160 			    __func__, sc->sc_phytype, reg);
   10161 #endif
   10162 			break;
   10163 		}
   10164 	}
   10165 
   10166 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10167 	sc->phy.release(sc);
   10168 }
   10169 
   10170 /*
   10171  * wm_gmii_i80003_readreg:	[mii interface function]
   10172  *
   10173  *	Read a PHY register on the kumeran
   10174  * This could be handled by the PHY layer if we didn't have to lock the
   10175  * ressource ...
   10176  */
   10177 static int
   10178 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10179 {
   10180 	struct wm_softc *sc = device_private(dev);
   10181 	int page_select, temp;
   10182 	int rv;
   10183 
   10184 	if (phy != 1) /* only one PHY on kumeran bus */
   10185 		return 0;
   10186 
   10187 	if (sc->phy.acquire(sc)) {
   10188 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10189 		return 0;
   10190 	}
   10191 
   10192 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10193 		page_select = GG82563_PHY_PAGE_SELECT;
   10194 	else {
   10195 		/*
   10196 		 * Use Alternative Page Select register to access registers
   10197 		 * 30 and 31.
   10198 		 */
   10199 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10200 	}
   10201 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10202 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10203 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10204 		/*
   10205 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10206 		 * register.
   10207 		 */
   10208 		delay(200);
   10209 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10210 			device_printf(dev, "%s failed\n", __func__);
   10211 			rv = 0; /* XXX */
   10212 			goto out;
   10213 		}
   10214 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10215 		delay(200);
   10216 	} else
   10217 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10218 
   10219 out:
   10220 	sc->phy.release(sc);
   10221 	return rv;
   10222 }
   10223 
   10224 /*
   10225  * wm_gmii_i80003_writereg:	[mii interface function]
   10226  *
   10227  *	Write a PHY register on the kumeran.
   10228  * This could be handled by the PHY layer if we didn't have to lock the
   10229  * ressource ...
   10230  */
   10231 static void
   10232 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10233 {
   10234 	struct wm_softc *sc = device_private(dev);
   10235 	int page_select, temp;
   10236 
   10237 	if (phy != 1) /* only one PHY on kumeran bus */
   10238 		return;
   10239 
   10240 	if (sc->phy.acquire(sc)) {
   10241 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10242 		return;
   10243 	}
   10244 
   10245 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10246 		page_select = GG82563_PHY_PAGE_SELECT;
   10247 	else {
   10248 		/*
   10249 		 * Use Alternative Page Select register to access registers
   10250 		 * 30 and 31.
   10251 		 */
   10252 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10253 	}
   10254 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10255 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10256 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10257 		/*
   10258 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10259 		 * register.
   10260 		 */
   10261 		delay(200);
   10262 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10263 			device_printf(dev, "%s failed\n", __func__);
   10264 			goto out;
   10265 		}
   10266 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10267 		delay(200);
   10268 	} else
   10269 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10270 
   10271 out:
   10272 	sc->phy.release(sc);
   10273 }
   10274 
   10275 /*
   10276  * wm_gmii_bm_readreg:	[mii interface function]
   10277  *
   10278  *	Read a PHY register on the kumeran
   10279  * This could be handled by the PHY layer if we didn't have to lock the
   10280  * ressource ...
   10281  */
   10282 static int
   10283 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10284 {
   10285 	struct wm_softc *sc = device_private(dev);
   10286 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10287 	uint16_t val;
   10288 	int rv;
   10289 
   10290 	if (sc->phy.acquire(sc)) {
   10291 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10292 		return 0;
   10293 	}
   10294 
   10295 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10296 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10297 		    || (reg == 31)) ? 1 : phy;
   10298 	/* Page 800 works differently than the rest so it has its own func */
   10299 	if (page == BM_WUC_PAGE) {
   10300 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10301 		rv = val;
   10302 		goto release;
   10303 	}
   10304 
   10305 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10306 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10307 		    && (sc->sc_type != WM_T_82583))
   10308 			wm_gmii_mdic_writereg(dev, phy,
   10309 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10310 		else
   10311 			wm_gmii_mdic_writereg(dev, phy,
   10312 			    BME1000_PHY_PAGE_SELECT, page);
   10313 	}
   10314 
   10315 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10316 
   10317 release:
   10318 	sc->phy.release(sc);
   10319 	return rv;
   10320 }
   10321 
   10322 /*
   10323  * wm_gmii_bm_writereg:	[mii interface function]
   10324  *
   10325  *	Write a PHY register on the kumeran.
   10326  * This could be handled by the PHY layer if we didn't have to lock the
   10327  * ressource ...
   10328  */
   10329 static void
   10330 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10331 {
   10332 	struct wm_softc *sc = device_private(dev);
   10333 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10334 
   10335 	if (sc->phy.acquire(sc)) {
   10336 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10337 		return;
   10338 	}
   10339 
   10340 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10341 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10342 		    || (reg == 31)) ? 1 : phy;
   10343 	/* Page 800 works differently than the rest so it has its own func */
   10344 	if (page == BM_WUC_PAGE) {
   10345 		uint16_t tmp;
   10346 
   10347 		tmp = val;
   10348 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10349 		goto release;
   10350 	}
   10351 
   10352 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10353 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10354 		    && (sc->sc_type != WM_T_82583))
   10355 			wm_gmii_mdic_writereg(dev, phy,
   10356 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10357 		else
   10358 			wm_gmii_mdic_writereg(dev, phy,
   10359 			    BME1000_PHY_PAGE_SELECT, page);
   10360 	}
   10361 
   10362 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10363 
   10364 release:
   10365 	sc->phy.release(sc);
   10366 }
   10367 
   10368 static void
   10369 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10370 {
   10371 	struct wm_softc *sc = device_private(dev);
   10372 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10373 	uint16_t wuce, reg;
   10374 
   10375 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10376 		device_xname(dev), __func__));
   10377 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10378 	if (sc->sc_type == WM_T_PCH) {
   10379 		/* XXX e1000 driver do nothing... why? */
   10380 	}
   10381 
   10382 	/*
   10383 	 * 1) Enable PHY wakeup register first.
   10384 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10385 	 */
   10386 
   10387 	/* Set page 769 */
   10388 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10389 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10390 
   10391 	/* Read WUCE and save it */
   10392 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10393 
   10394 	reg = wuce | BM_WUC_ENABLE_BIT;
   10395 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10396 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10397 
   10398 	/* Select page 800 */
   10399 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10400 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10401 
   10402 	/*
   10403 	 * 2) Access PHY wakeup register.
   10404 	 * See e1000_access_phy_wakeup_reg_bm.
   10405 	 */
   10406 
   10407 	/* Write page 800 */
   10408 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10409 
   10410 	if (rd)
   10411 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10412 	else
   10413 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10414 
   10415 	/*
   10416 	 * 3) Disable PHY wakeup register.
   10417 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10418 	 */
   10419 	/* Set page 769 */
   10420 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10421 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10422 
   10423 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10424 }
   10425 
   10426 /*
   10427  * wm_gmii_hv_readreg:	[mii interface function]
   10428  *
   10429  *	Read a PHY register on the kumeran
   10430  * This could be handled by the PHY layer if we didn't have to lock the
   10431  * ressource ...
   10432  */
   10433 static int
   10434 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10435 {
   10436 	struct wm_softc *sc = device_private(dev);
   10437 	int rv;
   10438 
   10439 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10440 		device_xname(dev), __func__));
   10441 	if (sc->phy.acquire(sc)) {
   10442 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10443 		return 0;
   10444 	}
   10445 
   10446 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10447 	sc->phy.release(sc);
   10448 	return rv;
   10449 }
   10450 
   10451 static int
   10452 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10453 {
   10454 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10455 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10456 	uint16_t val;
   10457 	int rv;
   10458 
   10459 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10460 
   10461 	/* Page 800 works differently than the rest so it has its own func */
   10462 	if (page == BM_WUC_PAGE) {
   10463 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10464 		return val;
   10465 	}
   10466 
   10467 	/*
   10468 	 * Lower than page 768 works differently than the rest so it has its
   10469 	 * own func
   10470 	 */
   10471 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10472 		printf("gmii_hv_readreg!!!\n");
   10473 		return 0;
   10474 	}
   10475 
   10476 	/*
   10477 	 * XXX I21[789] documents say that the SMBus Address register is at
   10478 	 * PHY address 01, Page 0 (not 768), Register 26.
   10479 	 */
   10480 	if (page == HV_INTC_FC_PAGE_START)
   10481 		page = 0;
   10482 
   10483 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10484 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10485 		    page << BME1000_PAGE_SHIFT);
   10486 	}
   10487 
   10488 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10489 	return rv;
   10490 }
   10491 
   10492 /*
   10493  * wm_gmii_hv_writereg:	[mii interface function]
   10494  *
   10495  *	Write a PHY register on the kumeran.
   10496  * This could be handled by the PHY layer if we didn't have to lock the
   10497  * ressource ...
   10498  */
   10499 static void
   10500 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10501 {
   10502 	struct wm_softc *sc = device_private(dev);
   10503 
   10504 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10505 		device_xname(dev), __func__));
   10506 
   10507 	if (sc->phy.acquire(sc)) {
   10508 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10509 		return;
   10510 	}
   10511 
   10512 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10513 	sc->phy.release(sc);
   10514 }
   10515 
   10516 static void
   10517 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10518 {
   10519 	struct wm_softc *sc = device_private(dev);
   10520 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10521 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10522 
   10523 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10524 
   10525 	/* Page 800 works differently than the rest so it has its own func */
   10526 	if (page == BM_WUC_PAGE) {
   10527 		uint16_t tmp;
   10528 
   10529 		tmp = val;
   10530 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10531 		return;
   10532 	}
   10533 
   10534 	/*
   10535 	 * Lower than page 768 works differently than the rest so it has its
   10536 	 * own func
   10537 	 */
   10538 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10539 		printf("gmii_hv_writereg!!!\n");
   10540 		return;
   10541 	}
   10542 
   10543 	{
   10544 		/*
   10545 		 * XXX I21[789] documents say that the SMBus Address register
   10546 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10547 		 */
   10548 		if (page == HV_INTC_FC_PAGE_START)
   10549 			page = 0;
   10550 
   10551 		/*
   10552 		 * XXX Workaround MDIO accesses being disabled after entering
   10553 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10554 		 * register is set)
   10555 		 */
   10556 		if (sc->sc_phytype == WMPHY_82578) {
   10557 			struct mii_softc *child;
   10558 
   10559 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10560 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10561 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10562 			    && ((val & (1 << 11)) != 0)) {
   10563 				printf("XXX need workaround\n");
   10564 			}
   10565 		}
   10566 
   10567 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10568 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10569 			    page << BME1000_PAGE_SHIFT);
   10570 		}
   10571 	}
   10572 
   10573 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10574 }
   10575 
   10576 /*
   10577  * wm_gmii_82580_readreg:	[mii interface function]
   10578  *
   10579  *	Read a PHY register on the 82580 and I350.
   10580  * This could be handled by the PHY layer if we didn't have to lock the
   10581  * ressource ...
   10582  */
   10583 static int
   10584 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10585 {
   10586 	struct wm_softc *sc = device_private(dev);
   10587 	int rv;
   10588 
   10589 	if (sc->phy.acquire(sc) != 0) {
   10590 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10591 		return 0;
   10592 	}
   10593 
   10594 #ifdef DIAGNOSTIC
   10595 	if (reg > MII_ADDRMASK) {
   10596 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10597 		    __func__, sc->sc_phytype, reg);
   10598 		reg &= MII_ADDRMASK;
   10599 	}
   10600 #endif
   10601 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10602 
   10603 	sc->phy.release(sc);
   10604 	return rv;
   10605 }
   10606 
   10607 /*
   10608  * wm_gmii_82580_writereg:	[mii interface function]
   10609  *
   10610  *	Write a PHY register on the 82580 and I350.
   10611  * This could be handled by the PHY layer if we didn't have to lock the
   10612  * ressource ...
   10613  */
   10614 static void
   10615 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10616 {
   10617 	struct wm_softc *sc = device_private(dev);
   10618 
   10619 	if (sc->phy.acquire(sc) != 0) {
   10620 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10621 		return;
   10622 	}
   10623 
   10624 #ifdef DIAGNOSTIC
   10625 	if (reg > MII_ADDRMASK) {
   10626 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10627 		    __func__, sc->sc_phytype, reg);
   10628 		reg &= MII_ADDRMASK;
   10629 	}
   10630 #endif
   10631 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10632 
   10633 	sc->phy.release(sc);
   10634 }
   10635 
   10636 /*
   10637  * wm_gmii_gs40g_readreg:	[mii interface function]
   10638  *
   10639  *	Read a PHY register on the I2100 and I211.
   10640  * This could be handled by the PHY layer if we didn't have to lock the
   10641  * ressource ...
   10642  */
   10643 static int
   10644 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10645 {
   10646 	struct wm_softc *sc = device_private(dev);
   10647 	int page, offset;
   10648 	int rv;
   10649 
   10650 	/* Acquire semaphore */
   10651 	if (sc->phy.acquire(sc)) {
   10652 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10653 		return 0;
   10654 	}
   10655 
   10656 	/* Page select */
   10657 	page = reg >> GS40G_PAGE_SHIFT;
   10658 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10659 
   10660 	/* Read reg */
   10661 	offset = reg & GS40G_OFFSET_MASK;
   10662 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10663 
   10664 	sc->phy.release(sc);
   10665 	return rv;
   10666 }
   10667 
   10668 /*
   10669  * wm_gmii_gs40g_writereg:	[mii interface function]
   10670  *
   10671  *	Write a PHY register on the I210 and I211.
   10672  * This could be handled by the PHY layer if we didn't have to lock the
   10673  * ressource ...
   10674  */
   10675 static void
   10676 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10677 {
   10678 	struct wm_softc *sc = device_private(dev);
   10679 	int page, offset;
   10680 
   10681 	/* Acquire semaphore */
   10682 	if (sc->phy.acquire(sc)) {
   10683 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10684 		return;
   10685 	}
   10686 
   10687 	/* Page select */
   10688 	page = reg >> GS40G_PAGE_SHIFT;
   10689 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10690 
   10691 	/* Write reg */
   10692 	offset = reg & GS40G_OFFSET_MASK;
   10693 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10694 
   10695 	/* Release semaphore */
   10696 	sc->phy.release(sc);
   10697 }
   10698 
   10699 /*
   10700  * wm_gmii_statchg:	[mii interface function]
   10701  *
   10702  *	Callback from MII layer when media changes.
   10703  */
   10704 static void
   10705 wm_gmii_statchg(struct ifnet *ifp)
   10706 {
   10707 	struct wm_softc *sc = ifp->if_softc;
   10708 	struct mii_data *mii = &sc->sc_mii;
   10709 
   10710 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10711 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10712 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10713 
   10714 	/*
   10715 	 * Get flow control negotiation result.
   10716 	 */
   10717 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10718 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10719 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10720 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10721 	}
   10722 
   10723 	if (sc->sc_flowflags & IFM_FLOW) {
   10724 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10725 			sc->sc_ctrl |= CTRL_TFCE;
   10726 			sc->sc_fcrtl |= FCRTL_XONE;
   10727 		}
   10728 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10729 			sc->sc_ctrl |= CTRL_RFCE;
   10730 	}
   10731 
   10732 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10733 		DPRINTF(WM_DEBUG_LINK,
   10734 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10735 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10736 	} else {
   10737 		DPRINTF(WM_DEBUG_LINK,
   10738 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10739 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10740 	}
   10741 
   10742 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10743 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10744 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10745 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10746 	if (sc->sc_type == WM_T_80003) {
   10747 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10748 		case IFM_1000_T:
   10749 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10750 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10751 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10752 			break;
   10753 		default:
   10754 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10755 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10756 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10757 			break;
   10758 		}
   10759 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10760 	}
   10761 }
   10762 
   10763 /* kumeran related (80003, ICH* and PCH*) */
   10764 
   10765 /*
   10766  * wm_kmrn_readreg:
   10767  *
   10768  *	Read a kumeran register
   10769  */
   10770 static int
   10771 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10772 {
   10773 	int rv;
   10774 
   10775 	if (sc->sc_type == WM_T_80003)
   10776 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10777 	else
   10778 		rv = sc->phy.acquire(sc);
   10779 	if (rv != 0) {
   10780 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10781 		    __func__);
   10782 		return rv;
   10783 	}
   10784 
   10785 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10786 
   10787 	if (sc->sc_type == WM_T_80003)
   10788 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10789 	else
   10790 		sc->phy.release(sc);
   10791 
   10792 	return rv;
   10793 }
   10794 
   10795 static int
   10796 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10797 {
   10798 
   10799 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10800 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10801 	    KUMCTRLSTA_REN);
   10802 	CSR_WRITE_FLUSH(sc);
   10803 	delay(2);
   10804 
   10805 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10806 
   10807 	return 0;
   10808 }
   10809 
   10810 /*
   10811  * wm_kmrn_writereg:
   10812  *
   10813  *	Write a kumeran register
   10814  */
   10815 static int
   10816 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10817 {
   10818 	int rv;
   10819 
   10820 	if (sc->sc_type == WM_T_80003)
   10821 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10822 	else
   10823 		rv = sc->phy.acquire(sc);
   10824 	if (rv != 0) {
   10825 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10826 		    __func__);
   10827 		return rv;
   10828 	}
   10829 
   10830 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10831 
   10832 	if (sc->sc_type == WM_T_80003)
   10833 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10834 	else
   10835 		sc->phy.release(sc);
   10836 
   10837 	return rv;
   10838 }
   10839 
   10840 static int
   10841 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10842 {
   10843 
   10844 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10845 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10846 
   10847 	return 0;
   10848 }
   10849 
   10850 /* SGMII related */
   10851 
   10852 /*
   10853  * wm_sgmii_uses_mdio
   10854  *
   10855  * Check whether the transaction is to the internal PHY or the external
   10856  * MDIO interface. Return true if it's MDIO.
   10857  */
   10858 static bool
   10859 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10860 {
   10861 	uint32_t reg;
   10862 	bool ismdio = false;
   10863 
   10864 	switch (sc->sc_type) {
   10865 	case WM_T_82575:
   10866 	case WM_T_82576:
   10867 		reg = CSR_READ(sc, WMREG_MDIC);
   10868 		ismdio = ((reg & MDIC_DEST) != 0);
   10869 		break;
   10870 	case WM_T_82580:
   10871 	case WM_T_I350:
   10872 	case WM_T_I354:
   10873 	case WM_T_I210:
   10874 	case WM_T_I211:
   10875 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10876 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10877 		break;
   10878 	default:
   10879 		break;
   10880 	}
   10881 
   10882 	return ismdio;
   10883 }
   10884 
   10885 /*
   10886  * wm_sgmii_readreg:	[mii interface function]
   10887  *
   10888  *	Read a PHY register on the SGMII
   10889  * This could be handled by the PHY layer if we didn't have to lock the
   10890  * ressource ...
   10891  */
   10892 static int
   10893 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10894 {
   10895 	struct wm_softc *sc = device_private(dev);
   10896 	uint32_t i2ccmd;
   10897 	int i, rv;
   10898 
   10899 	if (sc->phy.acquire(sc)) {
   10900 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10901 		return 0;
   10902 	}
   10903 
   10904 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10905 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10906 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10907 
   10908 	/* Poll the ready bit */
   10909 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10910 		delay(50);
   10911 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10912 		if (i2ccmd & I2CCMD_READY)
   10913 			break;
   10914 	}
   10915 	if ((i2ccmd & I2CCMD_READY) == 0)
   10916 		device_printf(dev, "I2CCMD Read did not complete\n");
   10917 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10918 		device_printf(dev, "I2CCMD Error bit set\n");
   10919 
   10920 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10921 
   10922 	sc->phy.release(sc);
   10923 	return rv;
   10924 }
   10925 
   10926 /*
   10927  * wm_sgmii_writereg:	[mii interface function]
   10928  *
   10929  *	Write a PHY register on the SGMII.
   10930  * This could be handled by the PHY layer if we didn't have to lock the
   10931  * ressource ...
   10932  */
   10933 static void
   10934 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10935 {
   10936 	struct wm_softc *sc = device_private(dev);
   10937 	uint32_t i2ccmd;
   10938 	int i;
   10939 	int swapdata;
   10940 
   10941 	if (sc->phy.acquire(sc) != 0) {
   10942 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10943 		return;
   10944 	}
   10945 	/* Swap the data bytes for the I2C interface */
   10946 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10947 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10948 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   10949 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10950 
   10951 	/* Poll the ready bit */
   10952 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10953 		delay(50);
   10954 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10955 		if (i2ccmd & I2CCMD_READY)
   10956 			break;
   10957 	}
   10958 	if ((i2ccmd & I2CCMD_READY) == 0)
   10959 		device_printf(dev, "I2CCMD Write did not complete\n");
   10960 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10961 		device_printf(dev, "I2CCMD Error bit set\n");
   10962 
   10963 	sc->phy.release(sc);
   10964 }
   10965 
   10966 /* TBI related */
   10967 
   10968 /*
   10969  * wm_tbi_mediainit:
   10970  *
   10971  *	Initialize media for use on 1000BASE-X devices.
   10972  */
   10973 static void
   10974 wm_tbi_mediainit(struct wm_softc *sc)
   10975 {
   10976 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10977 	const char *sep = "";
   10978 
   10979 	if (sc->sc_type < WM_T_82543)
   10980 		sc->sc_tipg = TIPG_WM_DFLT;
   10981 	else
   10982 		sc->sc_tipg = TIPG_LG_DFLT;
   10983 
   10984 	sc->sc_tbi_serdes_anegticks = 5;
   10985 
   10986 	/* Initialize our media structures */
   10987 	sc->sc_mii.mii_ifp = ifp;
   10988 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10989 
   10990 	if ((sc->sc_type >= WM_T_82575)
   10991 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10992 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10993 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10994 	else
   10995 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10996 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10997 
   10998 	/*
   10999 	 * SWD Pins:
   11000 	 *
   11001 	 *	0 = Link LED (output)
   11002 	 *	1 = Loss Of Signal (input)
   11003 	 */
   11004 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11005 
   11006 	/* XXX Perhaps this is only for TBI */
   11007 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11008 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11009 
   11010 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11011 		sc->sc_ctrl &= ~CTRL_LRST;
   11012 
   11013 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11014 
   11015 #define	ADD(ss, mm, dd)							\
   11016 do {									\
   11017 	aprint_normal("%s%s", sep, ss);					\
   11018 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11019 	sep = ", ";							\
   11020 } while (/*CONSTCOND*/0)
   11021 
   11022 	aprint_normal_dev(sc->sc_dev, "");
   11023 
   11024 	if (sc->sc_type == WM_T_I354) {
   11025 		uint32_t status;
   11026 
   11027 		status = CSR_READ(sc, WMREG_STATUS);
   11028 		if (((status & STATUS_2P5_SKU) != 0)
   11029 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11030 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11031 		} else
   11032 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11033 	} else if (sc->sc_type == WM_T_82545) {
   11034 		/* Only 82545 is LX (XXX except SFP) */
   11035 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11036 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11037 	} else {
   11038 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11039 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11040 	}
   11041 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11042 	aprint_normal("\n");
   11043 
   11044 #undef ADD
   11045 
   11046 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11047 }
   11048 
   11049 /*
   11050  * wm_tbi_mediachange:	[ifmedia interface function]
   11051  *
   11052  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11053  */
   11054 static int
   11055 wm_tbi_mediachange(struct ifnet *ifp)
   11056 {
   11057 	struct wm_softc *sc = ifp->if_softc;
   11058 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11059 	uint32_t status;
   11060 	int i;
   11061 
   11062 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11063 		/* XXX need some work for >= 82571 and < 82575 */
   11064 		if (sc->sc_type < WM_T_82575)
   11065 			return 0;
   11066 	}
   11067 
   11068 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11069 	    || (sc->sc_type >= WM_T_82575))
   11070 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11071 
   11072 	sc->sc_ctrl &= ~CTRL_LRST;
   11073 	sc->sc_txcw = TXCW_ANE;
   11074 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11075 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11076 	else if (ife->ifm_media & IFM_FDX)
   11077 		sc->sc_txcw |= TXCW_FD;
   11078 	else
   11079 		sc->sc_txcw |= TXCW_HD;
   11080 
   11081 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11082 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11083 
   11084 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11085 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11086 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11087 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11088 	CSR_WRITE_FLUSH(sc);
   11089 	delay(1000);
   11090 
   11091 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11092 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11093 
   11094 	/*
   11095 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11096 	 * optics detect a signal, 0 if they don't.
   11097 	 */
   11098 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11099 		/* Have signal; wait for the link to come up. */
   11100 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11101 			delay(10000);
   11102 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11103 				break;
   11104 		}
   11105 
   11106 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11107 			    device_xname(sc->sc_dev),i));
   11108 
   11109 		status = CSR_READ(sc, WMREG_STATUS);
   11110 		DPRINTF(WM_DEBUG_LINK,
   11111 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11112 			device_xname(sc->sc_dev),status, STATUS_LU));
   11113 		if (status & STATUS_LU) {
   11114 			/* Link is up. */
   11115 			DPRINTF(WM_DEBUG_LINK,
   11116 			    ("%s: LINK: set media -> link up %s\n",
   11117 			    device_xname(sc->sc_dev),
   11118 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11119 
   11120 			/*
   11121 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11122 			 * so we should update sc->sc_ctrl
   11123 			 */
   11124 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11125 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11126 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11127 			if (status & STATUS_FD)
   11128 				sc->sc_tctl |=
   11129 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11130 			else
   11131 				sc->sc_tctl |=
   11132 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11133 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11134 				sc->sc_fcrtl |= FCRTL_XONE;
   11135 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11136 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11137 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11138 				      sc->sc_fcrtl);
   11139 			sc->sc_tbi_linkup = 1;
   11140 		} else {
   11141 			if (i == WM_LINKUP_TIMEOUT)
   11142 				wm_check_for_link(sc);
   11143 			/* Link is down. */
   11144 			DPRINTF(WM_DEBUG_LINK,
   11145 			    ("%s: LINK: set media -> link down\n",
   11146 			    device_xname(sc->sc_dev)));
   11147 			sc->sc_tbi_linkup = 0;
   11148 		}
   11149 	} else {
   11150 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11151 		    device_xname(sc->sc_dev)));
   11152 		sc->sc_tbi_linkup = 0;
   11153 	}
   11154 
   11155 	wm_tbi_serdes_set_linkled(sc);
   11156 
   11157 	return 0;
   11158 }
   11159 
   11160 /*
   11161  * wm_tbi_mediastatus:	[ifmedia interface function]
   11162  *
   11163  *	Get the current interface media status on a 1000BASE-X device.
   11164  */
   11165 static void
   11166 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11167 {
   11168 	struct wm_softc *sc = ifp->if_softc;
   11169 	uint32_t ctrl, status;
   11170 
   11171 	ifmr->ifm_status = IFM_AVALID;
   11172 	ifmr->ifm_active = IFM_ETHER;
   11173 
   11174 	status = CSR_READ(sc, WMREG_STATUS);
   11175 	if ((status & STATUS_LU) == 0) {
   11176 		ifmr->ifm_active |= IFM_NONE;
   11177 		return;
   11178 	}
   11179 
   11180 	ifmr->ifm_status |= IFM_ACTIVE;
   11181 	/* Only 82545 is LX */
   11182 	if (sc->sc_type == WM_T_82545)
   11183 		ifmr->ifm_active |= IFM_1000_LX;
   11184 	else
   11185 		ifmr->ifm_active |= IFM_1000_SX;
   11186 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11187 		ifmr->ifm_active |= IFM_FDX;
   11188 	else
   11189 		ifmr->ifm_active |= IFM_HDX;
   11190 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11191 	if (ctrl & CTRL_RFCE)
   11192 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11193 	if (ctrl & CTRL_TFCE)
   11194 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11195 }
   11196 
   11197 /* XXX TBI only */
   11198 static int
   11199 wm_check_for_link(struct wm_softc *sc)
   11200 {
   11201 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11202 	uint32_t rxcw;
   11203 	uint32_t ctrl;
   11204 	uint32_t status;
   11205 	uint32_t sig;
   11206 
   11207 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11208 		/* XXX need some work for >= 82571 */
   11209 		if (sc->sc_type >= WM_T_82571) {
   11210 			sc->sc_tbi_linkup = 1;
   11211 			return 0;
   11212 		}
   11213 	}
   11214 
   11215 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11216 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11217 	status = CSR_READ(sc, WMREG_STATUS);
   11218 
   11219 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11220 
   11221 	DPRINTF(WM_DEBUG_LINK,
   11222 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11223 		device_xname(sc->sc_dev), __func__,
   11224 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11225 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11226 
   11227 	/*
   11228 	 * SWDPIN   LU RXCW
   11229 	 *      0    0    0
   11230 	 *      0    0    1	(should not happen)
   11231 	 *      0    1    0	(should not happen)
   11232 	 *      0    1    1	(should not happen)
   11233 	 *      1    0    0	Disable autonego and force linkup
   11234 	 *      1    0    1	got /C/ but not linkup yet
   11235 	 *      1    1    0	(linkup)
   11236 	 *      1    1    1	If IFM_AUTO, back to autonego
   11237 	 *
   11238 	 */
   11239 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11240 	    && ((status & STATUS_LU) == 0)
   11241 	    && ((rxcw & RXCW_C) == 0)) {
   11242 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11243 			__func__));
   11244 		sc->sc_tbi_linkup = 0;
   11245 		/* Disable auto-negotiation in the TXCW register */
   11246 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11247 
   11248 		/*
   11249 		 * Force link-up and also force full-duplex.
   11250 		 *
   11251 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11252 		 * so we should update sc->sc_ctrl
   11253 		 */
   11254 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11255 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11256 	} else if (((status & STATUS_LU) != 0)
   11257 	    && ((rxcw & RXCW_C) != 0)
   11258 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11259 		sc->sc_tbi_linkup = 1;
   11260 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11261 			__func__));
   11262 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11263 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11264 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11265 	    && ((rxcw & RXCW_C) != 0)) {
   11266 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11267 	} else {
   11268 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11269 			status));
   11270 	}
   11271 
   11272 	return 0;
   11273 }
   11274 
   11275 /*
   11276  * wm_tbi_tick:
   11277  *
   11278  *	Check the link on TBI devices.
   11279  *	This function acts as mii_tick().
   11280  */
   11281 static void
   11282 wm_tbi_tick(struct wm_softc *sc)
   11283 {
   11284 	struct mii_data *mii = &sc->sc_mii;
   11285 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11286 	uint32_t status;
   11287 
   11288 	KASSERT(WM_CORE_LOCKED(sc));
   11289 
   11290 	status = CSR_READ(sc, WMREG_STATUS);
   11291 
   11292 	/* XXX is this needed? */
   11293 	(void)CSR_READ(sc, WMREG_RXCW);
   11294 	(void)CSR_READ(sc, WMREG_CTRL);
   11295 
   11296 	/* set link status */
   11297 	if ((status & STATUS_LU) == 0) {
   11298 		DPRINTF(WM_DEBUG_LINK,
   11299 		    ("%s: LINK: checklink -> down\n",
   11300 			device_xname(sc->sc_dev)));
   11301 		sc->sc_tbi_linkup = 0;
   11302 	} else if (sc->sc_tbi_linkup == 0) {
   11303 		DPRINTF(WM_DEBUG_LINK,
   11304 		    ("%s: LINK: checklink -> up %s\n",
   11305 			device_xname(sc->sc_dev),
   11306 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11307 		sc->sc_tbi_linkup = 1;
   11308 		sc->sc_tbi_serdes_ticks = 0;
   11309 	}
   11310 
   11311 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11312 		goto setled;
   11313 
   11314 	if ((status & STATUS_LU) == 0) {
   11315 		sc->sc_tbi_linkup = 0;
   11316 		/* If the timer expired, retry autonegotiation */
   11317 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11318 		    && (++sc->sc_tbi_serdes_ticks
   11319 			>= sc->sc_tbi_serdes_anegticks)) {
   11320 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11321 			sc->sc_tbi_serdes_ticks = 0;
   11322 			/*
   11323 			 * Reset the link, and let autonegotiation do
   11324 			 * its thing
   11325 			 */
   11326 			sc->sc_ctrl |= CTRL_LRST;
   11327 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11328 			CSR_WRITE_FLUSH(sc);
   11329 			delay(1000);
   11330 			sc->sc_ctrl &= ~CTRL_LRST;
   11331 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11332 			CSR_WRITE_FLUSH(sc);
   11333 			delay(1000);
   11334 			CSR_WRITE(sc, WMREG_TXCW,
   11335 			    sc->sc_txcw & ~TXCW_ANE);
   11336 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11337 		}
   11338 	}
   11339 
   11340 setled:
   11341 	wm_tbi_serdes_set_linkled(sc);
   11342 }
   11343 
   11344 /* SERDES related */
   11345 static void
   11346 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11347 {
   11348 	uint32_t reg;
   11349 
   11350 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11351 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11352 		return;
   11353 
   11354 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11355 	reg |= PCS_CFG_PCS_EN;
   11356 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11357 
   11358 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11359 	reg &= ~CTRL_EXT_SWDPIN(3);
   11360 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11361 	CSR_WRITE_FLUSH(sc);
   11362 }
   11363 
   11364 static int
   11365 wm_serdes_mediachange(struct ifnet *ifp)
   11366 {
   11367 	struct wm_softc *sc = ifp->if_softc;
   11368 	bool pcs_autoneg = true; /* XXX */
   11369 	uint32_t ctrl_ext, pcs_lctl, reg;
   11370 
   11371 	/* XXX Currently, this function is not called on 8257[12] */
   11372 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11373 	    || (sc->sc_type >= WM_T_82575))
   11374 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11375 
   11376 	wm_serdes_power_up_link_82575(sc);
   11377 
   11378 	sc->sc_ctrl |= CTRL_SLU;
   11379 
   11380 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11381 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11382 
   11383 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11384 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11385 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11386 	case CTRL_EXT_LINK_MODE_SGMII:
   11387 		pcs_autoneg = true;
   11388 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11389 		break;
   11390 	case CTRL_EXT_LINK_MODE_1000KX:
   11391 		pcs_autoneg = false;
   11392 		/* FALLTHROUGH */
   11393 	default:
   11394 		if ((sc->sc_type == WM_T_82575)
   11395 		    || (sc->sc_type == WM_T_82576)) {
   11396 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11397 				pcs_autoneg = false;
   11398 		}
   11399 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11400 		    | CTRL_FRCFDX;
   11401 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11402 	}
   11403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11404 
   11405 	if (pcs_autoneg) {
   11406 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11407 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11408 
   11409 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11410 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11411 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11412 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11413 	} else
   11414 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11415 
   11416 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11417 
   11418 
   11419 	return 0;
   11420 }
   11421 
   11422 static void
   11423 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11424 {
   11425 	struct wm_softc *sc = ifp->if_softc;
   11426 	struct mii_data *mii = &sc->sc_mii;
   11427 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11428 	uint32_t pcs_adv, pcs_lpab, reg;
   11429 
   11430 	ifmr->ifm_status = IFM_AVALID;
   11431 	ifmr->ifm_active = IFM_ETHER;
   11432 
   11433 	/* Check PCS */
   11434 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11435 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11436 		ifmr->ifm_active |= IFM_NONE;
   11437 		sc->sc_tbi_linkup = 0;
   11438 		goto setled;
   11439 	}
   11440 
   11441 	sc->sc_tbi_linkup = 1;
   11442 	ifmr->ifm_status |= IFM_ACTIVE;
   11443 	if (sc->sc_type == WM_T_I354) {
   11444 		uint32_t status;
   11445 
   11446 		status = CSR_READ(sc, WMREG_STATUS);
   11447 		if (((status & STATUS_2P5_SKU) != 0)
   11448 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11449 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11450 		} else
   11451 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11452 	} else {
   11453 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11454 		case PCS_LSTS_SPEED_10:
   11455 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11456 			break;
   11457 		case PCS_LSTS_SPEED_100:
   11458 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11459 			break;
   11460 		case PCS_LSTS_SPEED_1000:
   11461 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11462 			break;
   11463 		default:
   11464 			device_printf(sc->sc_dev, "Unknown speed\n");
   11465 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11466 			break;
   11467 		}
   11468 	}
   11469 	if ((reg & PCS_LSTS_FDX) != 0)
   11470 		ifmr->ifm_active |= IFM_FDX;
   11471 	else
   11472 		ifmr->ifm_active |= IFM_HDX;
   11473 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11474 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11475 		/* Check flow */
   11476 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11477 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11478 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11479 			goto setled;
   11480 		}
   11481 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11482 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11483 		DPRINTF(WM_DEBUG_LINK,
   11484 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11485 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11486 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11487 			mii->mii_media_active |= IFM_FLOW
   11488 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11489 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11490 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11491 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11492 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11493 			mii->mii_media_active |= IFM_FLOW
   11494 			    | IFM_ETH_TXPAUSE;
   11495 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11496 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11497 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11498 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11499 			mii->mii_media_active |= IFM_FLOW
   11500 			    | IFM_ETH_RXPAUSE;
   11501 		}
   11502 	}
   11503 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11504 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11505 setled:
   11506 	wm_tbi_serdes_set_linkled(sc);
   11507 }
   11508 
   11509 /*
   11510  * wm_serdes_tick:
   11511  *
   11512  *	Check the link on serdes devices.
   11513  */
   11514 static void
   11515 wm_serdes_tick(struct wm_softc *sc)
   11516 {
   11517 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11518 	struct mii_data *mii = &sc->sc_mii;
   11519 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11520 	uint32_t reg;
   11521 
   11522 	KASSERT(WM_CORE_LOCKED(sc));
   11523 
   11524 	mii->mii_media_status = IFM_AVALID;
   11525 	mii->mii_media_active = IFM_ETHER;
   11526 
   11527 	/* Check PCS */
   11528 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11529 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11530 		mii->mii_media_status |= IFM_ACTIVE;
   11531 		sc->sc_tbi_linkup = 1;
   11532 		sc->sc_tbi_serdes_ticks = 0;
   11533 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11534 		if ((reg & PCS_LSTS_FDX) != 0)
   11535 			mii->mii_media_active |= IFM_FDX;
   11536 		else
   11537 			mii->mii_media_active |= IFM_HDX;
   11538 	} else {
   11539 		mii->mii_media_status |= IFM_NONE;
   11540 		sc->sc_tbi_linkup = 0;
   11541 		/* If the timer expired, retry autonegotiation */
   11542 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11543 		    && (++sc->sc_tbi_serdes_ticks
   11544 			>= sc->sc_tbi_serdes_anegticks)) {
   11545 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11546 			sc->sc_tbi_serdes_ticks = 0;
   11547 			/* XXX */
   11548 			wm_serdes_mediachange(ifp);
   11549 		}
   11550 	}
   11551 
   11552 	wm_tbi_serdes_set_linkled(sc);
   11553 }
   11554 
   11555 /* SFP related */
   11556 
   11557 static int
   11558 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11559 {
   11560 	uint32_t i2ccmd;
   11561 	int i;
   11562 
   11563 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11564 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11565 
   11566 	/* Poll the ready bit */
   11567 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11568 		delay(50);
   11569 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11570 		if (i2ccmd & I2CCMD_READY)
   11571 			break;
   11572 	}
   11573 	if ((i2ccmd & I2CCMD_READY) == 0)
   11574 		return -1;
   11575 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11576 		return -1;
   11577 
   11578 	*data = i2ccmd & 0x00ff;
   11579 
   11580 	return 0;
   11581 }
   11582 
   11583 static uint32_t
   11584 wm_sfp_get_media_type(struct wm_softc *sc)
   11585 {
   11586 	uint32_t ctrl_ext;
   11587 	uint8_t val = 0;
   11588 	int timeout = 3;
   11589 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11590 	int rv = -1;
   11591 
   11592 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11593 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11594 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11595 	CSR_WRITE_FLUSH(sc);
   11596 
   11597 	/* Read SFP module data */
   11598 	while (timeout) {
   11599 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11600 		if (rv == 0)
   11601 			break;
   11602 		delay(100*1000); /* XXX too big */
   11603 		timeout--;
   11604 	}
   11605 	if (rv != 0)
   11606 		goto out;
   11607 	switch (val) {
   11608 	case SFF_SFP_ID_SFF:
   11609 		aprint_normal_dev(sc->sc_dev,
   11610 		    "Module/Connector soldered to board\n");
   11611 		break;
   11612 	case SFF_SFP_ID_SFP:
   11613 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11614 		break;
   11615 	case SFF_SFP_ID_UNKNOWN:
   11616 		goto out;
   11617 	default:
   11618 		break;
   11619 	}
   11620 
   11621 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11622 	if (rv != 0) {
   11623 		goto out;
   11624 	}
   11625 
   11626 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11627 		mediatype = WM_MEDIATYPE_SERDES;
   11628 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11629 		sc->sc_flags |= WM_F_SGMII;
   11630 		mediatype = WM_MEDIATYPE_COPPER;
   11631 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11632 		sc->sc_flags |= WM_F_SGMII;
   11633 		mediatype = WM_MEDIATYPE_SERDES;
   11634 	}
   11635 
   11636 out:
   11637 	/* Restore I2C interface setting */
   11638 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11639 
   11640 	return mediatype;
   11641 }
   11642 
   11643 /*
   11644  * NVM related.
   11645  * Microwire, SPI (w/wo EERD) and Flash.
   11646  */
   11647 
   11648 /* Both spi and uwire */
   11649 
   11650 /*
   11651  * wm_eeprom_sendbits:
   11652  *
   11653  *	Send a series of bits to the EEPROM.
   11654  */
   11655 static void
   11656 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11657 {
   11658 	uint32_t reg;
   11659 	int x;
   11660 
   11661 	reg = CSR_READ(sc, WMREG_EECD);
   11662 
   11663 	for (x = nbits; x > 0; x--) {
   11664 		if (bits & (1U << (x - 1)))
   11665 			reg |= EECD_DI;
   11666 		else
   11667 			reg &= ~EECD_DI;
   11668 		CSR_WRITE(sc, WMREG_EECD, reg);
   11669 		CSR_WRITE_FLUSH(sc);
   11670 		delay(2);
   11671 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11672 		CSR_WRITE_FLUSH(sc);
   11673 		delay(2);
   11674 		CSR_WRITE(sc, WMREG_EECD, reg);
   11675 		CSR_WRITE_FLUSH(sc);
   11676 		delay(2);
   11677 	}
   11678 }
   11679 
   11680 /*
   11681  * wm_eeprom_recvbits:
   11682  *
   11683  *	Receive a series of bits from the EEPROM.
   11684  */
   11685 static void
   11686 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11687 {
   11688 	uint32_t reg, val;
   11689 	int x;
   11690 
   11691 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11692 
   11693 	val = 0;
   11694 	for (x = nbits; x > 0; x--) {
   11695 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11696 		CSR_WRITE_FLUSH(sc);
   11697 		delay(2);
   11698 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11699 			val |= (1U << (x - 1));
   11700 		CSR_WRITE(sc, WMREG_EECD, reg);
   11701 		CSR_WRITE_FLUSH(sc);
   11702 		delay(2);
   11703 	}
   11704 	*valp = val;
   11705 }
   11706 
   11707 /* Microwire */
   11708 
   11709 /*
   11710  * wm_nvm_read_uwire:
   11711  *
   11712  *	Read a word from the EEPROM using the MicroWire protocol.
   11713  */
   11714 static int
   11715 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11716 {
   11717 	uint32_t reg, val;
   11718 	int i;
   11719 
   11720 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11721 		device_xname(sc->sc_dev), __func__));
   11722 
   11723 	if (sc->nvm.acquire(sc) != 0)
   11724 		return -1;
   11725 
   11726 	for (i = 0; i < wordcnt; i++) {
   11727 		/* Clear SK and DI. */
   11728 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11729 		CSR_WRITE(sc, WMREG_EECD, reg);
   11730 
   11731 		/*
   11732 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11733 		 * and Xen.
   11734 		 *
   11735 		 * We use this workaround only for 82540 because qemu's
   11736 		 * e1000 act as 82540.
   11737 		 */
   11738 		if (sc->sc_type == WM_T_82540) {
   11739 			reg |= EECD_SK;
   11740 			CSR_WRITE(sc, WMREG_EECD, reg);
   11741 			reg &= ~EECD_SK;
   11742 			CSR_WRITE(sc, WMREG_EECD, reg);
   11743 			CSR_WRITE_FLUSH(sc);
   11744 			delay(2);
   11745 		}
   11746 		/* XXX: end of workaround */
   11747 
   11748 		/* Set CHIP SELECT. */
   11749 		reg |= EECD_CS;
   11750 		CSR_WRITE(sc, WMREG_EECD, reg);
   11751 		CSR_WRITE_FLUSH(sc);
   11752 		delay(2);
   11753 
   11754 		/* Shift in the READ command. */
   11755 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11756 
   11757 		/* Shift in address. */
   11758 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11759 
   11760 		/* Shift out the data. */
   11761 		wm_eeprom_recvbits(sc, &val, 16);
   11762 		data[i] = val & 0xffff;
   11763 
   11764 		/* Clear CHIP SELECT. */
   11765 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11766 		CSR_WRITE(sc, WMREG_EECD, reg);
   11767 		CSR_WRITE_FLUSH(sc);
   11768 		delay(2);
   11769 	}
   11770 
   11771 	sc->nvm.release(sc);
   11772 	return 0;
   11773 }
   11774 
   11775 /* SPI */
   11776 
   11777 /*
   11778  * Set SPI and FLASH related information from the EECD register.
   11779  * For 82541 and 82547, the word size is taken from EEPROM.
   11780  */
   11781 static int
   11782 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11783 {
   11784 	int size;
   11785 	uint32_t reg;
   11786 	uint16_t data;
   11787 
   11788 	reg = CSR_READ(sc, WMREG_EECD);
   11789 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11790 
   11791 	/* Read the size of NVM from EECD by default */
   11792 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11793 	switch (sc->sc_type) {
   11794 	case WM_T_82541:
   11795 	case WM_T_82541_2:
   11796 	case WM_T_82547:
   11797 	case WM_T_82547_2:
   11798 		/* Set dummy value to access EEPROM */
   11799 		sc->sc_nvm_wordsize = 64;
   11800 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11801 			aprint_error_dev(sc->sc_dev,
   11802 			    "%s: failed to read EEPROM size\n", __func__);
   11803 		}
   11804 		reg = data;
   11805 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11806 		if (size == 0)
   11807 			size = 6; /* 64 word size */
   11808 		else
   11809 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11810 		break;
   11811 	case WM_T_80003:
   11812 	case WM_T_82571:
   11813 	case WM_T_82572:
   11814 	case WM_T_82573: /* SPI case */
   11815 	case WM_T_82574: /* SPI case */
   11816 	case WM_T_82583: /* SPI case */
   11817 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11818 		if (size > 14)
   11819 			size = 14;
   11820 		break;
   11821 	case WM_T_82575:
   11822 	case WM_T_82576:
   11823 	case WM_T_82580:
   11824 	case WM_T_I350:
   11825 	case WM_T_I354:
   11826 	case WM_T_I210:
   11827 	case WM_T_I211:
   11828 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11829 		if (size > 15)
   11830 			size = 15;
   11831 		break;
   11832 	default:
   11833 		aprint_error_dev(sc->sc_dev,
   11834 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11835 		return -1;
   11836 		break;
   11837 	}
   11838 
   11839 	sc->sc_nvm_wordsize = 1 << size;
   11840 
   11841 	return 0;
   11842 }
   11843 
   11844 /*
   11845  * wm_nvm_ready_spi:
   11846  *
   11847  *	Wait for a SPI EEPROM to be ready for commands.
   11848  */
   11849 static int
   11850 wm_nvm_ready_spi(struct wm_softc *sc)
   11851 {
   11852 	uint32_t val;
   11853 	int usec;
   11854 
   11855 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11856 		device_xname(sc->sc_dev), __func__));
   11857 
   11858 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11859 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11860 		wm_eeprom_recvbits(sc, &val, 8);
   11861 		if ((val & SPI_SR_RDY) == 0)
   11862 			break;
   11863 	}
   11864 	if (usec >= SPI_MAX_RETRIES) {
   11865 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11866 		return -1;
   11867 	}
   11868 	return 0;
   11869 }
   11870 
   11871 /*
   11872  * wm_nvm_read_spi:
   11873  *
   11874  *	Read a work from the EEPROM using the SPI protocol.
   11875  */
   11876 static int
   11877 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11878 {
   11879 	uint32_t reg, val;
   11880 	int i;
   11881 	uint8_t opc;
   11882 	int rv = 0;
   11883 
   11884 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11885 		device_xname(sc->sc_dev), __func__));
   11886 
   11887 	if (sc->nvm.acquire(sc) != 0)
   11888 		return -1;
   11889 
   11890 	/* Clear SK and CS. */
   11891 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11892 	CSR_WRITE(sc, WMREG_EECD, reg);
   11893 	CSR_WRITE_FLUSH(sc);
   11894 	delay(2);
   11895 
   11896 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11897 		goto out;
   11898 
   11899 	/* Toggle CS to flush commands. */
   11900 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11901 	CSR_WRITE_FLUSH(sc);
   11902 	delay(2);
   11903 	CSR_WRITE(sc, WMREG_EECD, reg);
   11904 	CSR_WRITE_FLUSH(sc);
   11905 	delay(2);
   11906 
   11907 	opc = SPI_OPC_READ;
   11908 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11909 		opc |= SPI_OPC_A8;
   11910 
   11911 	wm_eeprom_sendbits(sc, opc, 8);
   11912 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11913 
   11914 	for (i = 0; i < wordcnt; i++) {
   11915 		wm_eeprom_recvbits(sc, &val, 16);
   11916 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11917 	}
   11918 
   11919 	/* Raise CS and clear SK. */
   11920 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11921 	CSR_WRITE(sc, WMREG_EECD, reg);
   11922 	CSR_WRITE_FLUSH(sc);
   11923 	delay(2);
   11924 
   11925 out:
   11926 	sc->nvm.release(sc);
   11927 	return rv;
   11928 }
   11929 
   11930 /* Using with EERD */
   11931 
   11932 static int
   11933 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11934 {
   11935 	uint32_t attempts = 100000;
   11936 	uint32_t i, reg = 0;
   11937 	int32_t done = -1;
   11938 
   11939 	for (i = 0; i < attempts; i++) {
   11940 		reg = CSR_READ(sc, rw);
   11941 
   11942 		if (reg & EERD_DONE) {
   11943 			done = 0;
   11944 			break;
   11945 		}
   11946 		delay(5);
   11947 	}
   11948 
   11949 	return done;
   11950 }
   11951 
   11952 static int
   11953 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   11954 {
   11955 	int i, eerd = 0;
   11956 	int rv = 0;
   11957 
   11958 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11959 		device_xname(sc->sc_dev), __func__));
   11960 
   11961 	if (sc->nvm.acquire(sc) != 0)
   11962 		return -1;
   11963 
   11964 	for (i = 0; i < wordcnt; i++) {
   11965 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11966 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11967 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11968 		if (rv != 0) {
   11969 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11970 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11971 			break;
   11972 		}
   11973 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11974 	}
   11975 
   11976 	sc->nvm.release(sc);
   11977 	return rv;
   11978 }
   11979 
   11980 /* Flash */
   11981 
   11982 static int
   11983 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11984 {
   11985 	uint32_t eecd;
   11986 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11987 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11988 	uint32_t nvm_dword = 0;
   11989 	uint8_t sig_byte = 0;
   11990  	int rv;
   11991 
   11992 	switch (sc->sc_type) {
   11993 	case WM_T_PCH_SPT:
   11994 	case WM_T_PCH_CNP:
   11995 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   11996 		act_offset = ICH_NVM_SIG_WORD * 2;
   11997 
   11998 		/* set bank to 0 in case flash read fails. */
   11999 		*bank = 0;
   12000 
   12001 		/* Check bank 0 */
   12002 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12003 		if (rv != 0)
   12004 			return rv;
   12005 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12006 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12007 			*bank = 0;
   12008 			return 0;
   12009 		}
   12010 
   12011 		/* Check bank 1 */
   12012 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12013 		    &nvm_dword);
   12014 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12015 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12016 			*bank = 1;
   12017 			return 0;
   12018 		}
   12019 		aprint_error_dev(sc->sc_dev,
   12020 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12021 		return -1;
   12022 	case WM_T_ICH8:
   12023 	case WM_T_ICH9:
   12024 		eecd = CSR_READ(sc, WMREG_EECD);
   12025 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12026 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12027 			return 0;
   12028 		}
   12029 		/* FALLTHROUGH */
   12030 	default:
   12031 		/* Default to 0 */
   12032 		*bank = 0;
   12033 
   12034 		/* Check bank 0 */
   12035 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12036 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12037 			*bank = 0;
   12038 			return 0;
   12039 		}
   12040 
   12041 		/* Check bank 1 */
   12042 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12043 		    &sig_byte);
   12044 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12045 			*bank = 1;
   12046 			return 0;
   12047 		}
   12048 	}
   12049 
   12050 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12051 		device_xname(sc->sc_dev)));
   12052 	return -1;
   12053 }
   12054 
   12055 /******************************************************************************
   12056  * This function does initial flash setup so that a new read/write/erase cycle
   12057  * can be started.
   12058  *
   12059  * sc - The pointer to the hw structure
   12060  ****************************************************************************/
   12061 static int32_t
   12062 wm_ich8_cycle_init(struct wm_softc *sc)
   12063 {
   12064 	uint16_t hsfsts;
   12065 	int32_t error = 1;
   12066 	int32_t i     = 0;
   12067 
   12068 	if (sc->sc_type >= WM_T_PCH_SPT)
   12069 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12070 	else
   12071 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12072 
   12073 	/* May be check the Flash Des Valid bit in Hw status */
   12074 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12075 		return error;
   12076 	}
   12077 
   12078 	/* Clear FCERR in Hw status by writing 1 */
   12079 	/* Clear DAEL in Hw status by writing a 1 */
   12080 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12081 
   12082 	if (sc->sc_type >= WM_T_PCH_SPT)
   12083 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12084 	else
   12085 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12086 
   12087 	/*
   12088 	 * Either we should have a hardware SPI cycle in progress bit to check
   12089 	 * against, in order to start a new cycle or FDONE bit should be
   12090 	 * changed in the hardware so that it is 1 after harware reset, which
   12091 	 * can then be used as an indication whether a cycle is in progress or
   12092 	 * has been completed .. we should also have some software semaphore
   12093 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12094 	 * threads access to those bits can be sequentiallized or a way so that
   12095 	 * 2 threads dont start the cycle at the same time
   12096 	 */
   12097 
   12098 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12099 		/*
   12100 		 * There is no cycle running at present, so we can start a
   12101 		 * cycle
   12102 		 */
   12103 
   12104 		/* Begin by setting Flash Cycle Done. */
   12105 		hsfsts |= HSFSTS_DONE;
   12106 		if (sc->sc_type >= WM_T_PCH_SPT)
   12107 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12108 			    hsfsts & 0xffffUL);
   12109 		else
   12110 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12111 		error = 0;
   12112 	} else {
   12113 		/*
   12114 		 * otherwise poll for sometime so the current cycle has a
   12115 		 * chance to end before giving up.
   12116 		 */
   12117 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12118 			if (sc->sc_type >= WM_T_PCH_SPT)
   12119 				hsfsts = ICH8_FLASH_READ32(sc,
   12120 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12121 			else
   12122 				hsfsts = ICH8_FLASH_READ16(sc,
   12123 				    ICH_FLASH_HSFSTS);
   12124 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12125 				error = 0;
   12126 				break;
   12127 			}
   12128 			delay(1);
   12129 		}
   12130 		if (error == 0) {
   12131 			/*
   12132 			 * Successful in waiting for previous cycle to timeout,
   12133 			 * now set the Flash Cycle Done.
   12134 			 */
   12135 			hsfsts |= HSFSTS_DONE;
   12136 			if (sc->sc_type >= WM_T_PCH_SPT)
   12137 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12138 				    hsfsts & 0xffffUL);
   12139 			else
   12140 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12141 				    hsfsts);
   12142 		}
   12143 	}
   12144 	return error;
   12145 }
   12146 
   12147 /******************************************************************************
   12148  * This function starts a flash cycle and waits for its completion
   12149  *
   12150  * sc - The pointer to the hw structure
   12151  ****************************************************************************/
   12152 static int32_t
   12153 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12154 {
   12155 	uint16_t hsflctl;
   12156 	uint16_t hsfsts;
   12157 	int32_t error = 1;
   12158 	uint32_t i = 0;
   12159 
   12160 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12161 	if (sc->sc_type >= WM_T_PCH_SPT)
   12162 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12163 	else
   12164 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12165 	hsflctl |= HSFCTL_GO;
   12166 	if (sc->sc_type >= WM_T_PCH_SPT)
   12167 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12168 		    (uint32_t)hsflctl << 16);
   12169 	else
   12170 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12171 
   12172 	/* Wait till FDONE bit is set to 1 */
   12173 	do {
   12174 		if (sc->sc_type >= WM_T_PCH_SPT)
   12175 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12176 			    & 0xffffUL;
   12177 		else
   12178 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12179 		if (hsfsts & HSFSTS_DONE)
   12180 			break;
   12181 		delay(1);
   12182 		i++;
   12183 	} while (i < timeout);
   12184 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12185 		error = 0;
   12186 
   12187 	return error;
   12188 }
   12189 
   12190 /******************************************************************************
   12191  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12192  *
   12193  * sc - The pointer to the hw structure
   12194  * index - The index of the byte or word to read.
   12195  * size - Size of data to read, 1=byte 2=word, 4=dword
   12196  * data - Pointer to the word to store the value read.
   12197  *****************************************************************************/
   12198 static int32_t
   12199 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12200     uint32_t size, uint32_t *data)
   12201 {
   12202 	uint16_t hsfsts;
   12203 	uint16_t hsflctl;
   12204 	uint32_t flash_linear_address;
   12205 	uint32_t flash_data = 0;
   12206 	int32_t error = 1;
   12207 	int32_t count = 0;
   12208 
   12209 	if (size < 1  || size > 4 || data == 0x0 ||
   12210 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12211 		return error;
   12212 
   12213 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12214 	    sc->sc_ich8_flash_base;
   12215 
   12216 	do {
   12217 		delay(1);
   12218 		/* Steps */
   12219 		error = wm_ich8_cycle_init(sc);
   12220 		if (error)
   12221 			break;
   12222 
   12223 		if (sc->sc_type >= WM_T_PCH_SPT)
   12224 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12225 			    >> 16;
   12226 		else
   12227 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12228 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12229 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12230 		    & HSFCTL_BCOUNT_MASK;
   12231 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12232 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12233 			/*
   12234 			 * In SPT, This register is in Lan memory space, not
   12235 			 * flash. Therefore, only 32 bit access is supported.
   12236 			 */
   12237 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12238 			    (uint32_t)hsflctl << 16);
   12239 		} else
   12240 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12241 
   12242 		/*
   12243 		 * Write the last 24 bits of index into Flash Linear address
   12244 		 * field in Flash Address
   12245 		 */
   12246 		/* TODO: TBD maybe check the index against the size of flash */
   12247 
   12248 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12249 
   12250 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12251 
   12252 		/*
   12253 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12254 		 * the whole sequence a few more times, else read in (shift in)
   12255 		 * the Flash Data0, the order is least significant byte first
   12256 		 * msb to lsb
   12257 		 */
   12258 		if (error == 0) {
   12259 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12260 			if (size == 1)
   12261 				*data = (uint8_t)(flash_data & 0x000000FF);
   12262 			else if (size == 2)
   12263 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12264 			else if (size == 4)
   12265 				*data = (uint32_t)flash_data;
   12266 			break;
   12267 		} else {
   12268 			/*
   12269 			 * If we've gotten here, then things are probably
   12270 			 * completely hosed, but if the error condition is
   12271 			 * detected, it won't hurt to give it another try...
   12272 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12273 			 */
   12274 			if (sc->sc_type >= WM_T_PCH_SPT)
   12275 				hsfsts = ICH8_FLASH_READ32(sc,
   12276 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12277 			else
   12278 				hsfsts = ICH8_FLASH_READ16(sc,
   12279 				    ICH_FLASH_HSFSTS);
   12280 
   12281 			if (hsfsts & HSFSTS_ERR) {
   12282 				/* Repeat for some time before giving up. */
   12283 				continue;
   12284 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12285 				break;
   12286 		}
   12287 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12288 
   12289 	return error;
   12290 }
   12291 
   12292 /******************************************************************************
   12293  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12294  *
   12295  * sc - pointer to wm_hw structure
   12296  * index - The index of the byte to read.
   12297  * data - Pointer to a byte to store the value read.
   12298  *****************************************************************************/
   12299 static int32_t
   12300 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12301 {
   12302 	int32_t status;
   12303 	uint32_t word = 0;
   12304 
   12305 	status = wm_read_ich8_data(sc, index, 1, &word);
   12306 	if (status == 0)
   12307 		*data = (uint8_t)word;
   12308 	else
   12309 		*data = 0;
   12310 
   12311 	return status;
   12312 }
   12313 
   12314 /******************************************************************************
   12315  * Reads a word from the NVM using the ICH8 flash access registers.
   12316  *
   12317  * sc - pointer to wm_hw structure
   12318  * index - The starting byte index of the word to read.
   12319  * data - Pointer to a word to store the value read.
   12320  *****************************************************************************/
   12321 static int32_t
   12322 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12323 {
   12324 	int32_t status;
   12325 	uint32_t word = 0;
   12326 
   12327 	status = wm_read_ich8_data(sc, index, 2, &word);
   12328 	if (status == 0)
   12329 		*data = (uint16_t)word;
   12330 	else
   12331 		*data = 0;
   12332 
   12333 	return status;
   12334 }
   12335 
   12336 /******************************************************************************
   12337  * Reads a dword from the NVM using the ICH8 flash access registers.
   12338  *
   12339  * sc - pointer to wm_hw structure
   12340  * index - The starting byte index of the word to read.
   12341  * data - Pointer to a word to store the value read.
   12342  *****************************************************************************/
   12343 static int32_t
   12344 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12345 {
   12346 	int32_t status;
   12347 
   12348 	status = wm_read_ich8_data(sc, index, 4, data);
   12349 	return status;
   12350 }
   12351 
   12352 /******************************************************************************
   12353  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12354  * register.
   12355  *
   12356  * sc - Struct containing variables accessed by shared code
   12357  * offset - offset of word in the EEPROM to read
   12358  * data - word read from the EEPROM
   12359  * words - number of words to read
   12360  *****************************************************************************/
   12361 static int
   12362 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12363 {
   12364 	int32_t  rv = 0;
   12365 	uint32_t flash_bank = 0;
   12366 	uint32_t act_offset = 0;
   12367 	uint32_t bank_offset = 0;
   12368 	uint16_t word = 0;
   12369 	uint16_t i = 0;
   12370 
   12371 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12372 		device_xname(sc->sc_dev), __func__));
   12373 
   12374 	if (sc->nvm.acquire(sc) != 0)
   12375 		return -1;
   12376 
   12377 	/*
   12378 	 * We need to know which is the valid flash bank.  In the event
   12379 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12380 	 * managing flash_bank.  So it cannot be trusted and needs
   12381 	 * to be updated with each read.
   12382 	 */
   12383 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12384 	if (rv) {
   12385 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12386 			device_xname(sc->sc_dev)));
   12387 		flash_bank = 0;
   12388 	}
   12389 
   12390 	/*
   12391 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12392 	 * size
   12393 	 */
   12394 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12395 
   12396 	for (i = 0; i < words; i++) {
   12397 		/* The NVM part needs a byte offset, hence * 2 */
   12398 		act_offset = bank_offset + ((offset + i) * 2);
   12399 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12400 		if (rv) {
   12401 			aprint_error_dev(sc->sc_dev,
   12402 			    "%s: failed to read NVM\n", __func__);
   12403 			break;
   12404 		}
   12405 		data[i] = word;
   12406 	}
   12407 
   12408 	sc->nvm.release(sc);
   12409 	return rv;
   12410 }
   12411 
   12412 /******************************************************************************
   12413  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12414  * register.
   12415  *
   12416  * sc - Struct containing variables accessed by shared code
   12417  * offset - offset of word in the EEPROM to read
   12418  * data - word read from the EEPROM
   12419  * words - number of words to read
   12420  *****************************************************************************/
   12421 static int
   12422 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12423 {
   12424 	int32_t  rv = 0;
   12425 	uint32_t flash_bank = 0;
   12426 	uint32_t act_offset = 0;
   12427 	uint32_t bank_offset = 0;
   12428 	uint32_t dword = 0;
   12429 	uint16_t i = 0;
   12430 
   12431 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12432 		device_xname(sc->sc_dev), __func__));
   12433 
   12434 	if (sc->nvm.acquire(sc) != 0)
   12435 		return -1;
   12436 
   12437 	/*
   12438 	 * We need to know which is the valid flash bank.  In the event
   12439 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12440 	 * managing flash_bank.  So it cannot be trusted and needs
   12441 	 * to be updated with each read.
   12442 	 */
   12443 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12444 	if (rv) {
   12445 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12446 			device_xname(sc->sc_dev)));
   12447 		flash_bank = 0;
   12448 	}
   12449 
   12450 	/*
   12451 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12452 	 * size
   12453 	 */
   12454 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12455 
   12456 	for (i = 0; i < words; i++) {
   12457 		/* The NVM part needs a byte offset, hence * 2 */
   12458 		act_offset = bank_offset + ((offset + i) * 2);
   12459 		/* but we must read dword aligned, so mask ... */
   12460 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12461 		if (rv) {
   12462 			aprint_error_dev(sc->sc_dev,
   12463 			    "%s: failed to read NVM\n", __func__);
   12464 			break;
   12465 		}
   12466 		/* ... and pick out low or high word */
   12467 		if ((act_offset & 0x2) == 0)
   12468 			data[i] = (uint16_t)(dword & 0xFFFF);
   12469 		else
   12470 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12471 	}
   12472 
   12473 	sc->nvm.release(sc);
   12474 	return rv;
   12475 }
   12476 
   12477 /* iNVM */
   12478 
   12479 static int
   12480 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12481 {
   12482 	int32_t  rv = 0;
   12483 	uint32_t invm_dword;
   12484 	uint16_t i;
   12485 	uint8_t record_type, word_address;
   12486 
   12487 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12488 		device_xname(sc->sc_dev), __func__));
   12489 
   12490 	for (i = 0; i < INVM_SIZE; i++) {
   12491 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12492 		/* Get record type */
   12493 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12494 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12495 			break;
   12496 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12497 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12498 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12499 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12500 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12501 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12502 			if (word_address == address) {
   12503 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12504 				rv = 0;
   12505 				break;
   12506 			}
   12507 		}
   12508 	}
   12509 
   12510 	return rv;
   12511 }
   12512 
   12513 static int
   12514 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12515 {
   12516 	int rv = 0;
   12517 	int i;
   12518 
   12519 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12520 		device_xname(sc->sc_dev), __func__));
   12521 
   12522 	if (sc->nvm.acquire(sc) != 0)
   12523 		return -1;
   12524 
   12525 	for (i = 0; i < words; i++) {
   12526 		switch (offset + i) {
   12527 		case NVM_OFF_MACADDR:
   12528 		case NVM_OFF_MACADDR1:
   12529 		case NVM_OFF_MACADDR2:
   12530 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12531 			if (rv != 0) {
   12532 				data[i] = 0xffff;
   12533 				rv = -1;
   12534 			}
   12535 			break;
   12536 		case NVM_OFF_CFG2:
   12537 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12538 			if (rv != 0) {
   12539 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12540 				rv = 0;
   12541 			}
   12542 			break;
   12543 		case NVM_OFF_CFG4:
   12544 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12545 			if (rv != 0) {
   12546 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12547 				rv = 0;
   12548 			}
   12549 			break;
   12550 		case NVM_OFF_LED_1_CFG:
   12551 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12552 			if (rv != 0) {
   12553 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12554 				rv = 0;
   12555 			}
   12556 			break;
   12557 		case NVM_OFF_LED_0_2_CFG:
   12558 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12559 			if (rv != 0) {
   12560 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12561 				rv = 0;
   12562 			}
   12563 			break;
   12564 		case NVM_OFF_ID_LED_SETTINGS:
   12565 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12566 			if (rv != 0) {
   12567 				*data = ID_LED_RESERVED_FFFF;
   12568 				rv = 0;
   12569 			}
   12570 			break;
   12571 		default:
   12572 			DPRINTF(WM_DEBUG_NVM,
   12573 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12574 			*data = NVM_RESERVED_WORD;
   12575 			break;
   12576 		}
   12577 	}
   12578 
   12579 	sc->nvm.release(sc);
   12580 	return rv;
   12581 }
   12582 
   12583 /* Lock, detecting NVM type, validate checksum, version and read */
   12584 
   12585 static int
   12586 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12587 {
   12588 	uint32_t eecd = 0;
   12589 
   12590 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12591 	    || sc->sc_type == WM_T_82583) {
   12592 		eecd = CSR_READ(sc, WMREG_EECD);
   12593 
   12594 		/* Isolate bits 15 & 16 */
   12595 		eecd = ((eecd >> 15) & 0x03);
   12596 
   12597 		/* If both bits are set, device is Flash type */
   12598 		if (eecd == 0x03)
   12599 			return 0;
   12600 	}
   12601 	return 1;
   12602 }
   12603 
   12604 static int
   12605 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12606 {
   12607 	uint32_t eec;
   12608 
   12609 	eec = CSR_READ(sc, WMREG_EEC);
   12610 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12611 		return 1;
   12612 
   12613 	return 0;
   12614 }
   12615 
   12616 /*
   12617  * wm_nvm_validate_checksum
   12618  *
   12619  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12620  */
   12621 static int
   12622 wm_nvm_validate_checksum(struct wm_softc *sc)
   12623 {
   12624 	uint16_t checksum;
   12625 	uint16_t eeprom_data;
   12626 #ifdef WM_DEBUG
   12627 	uint16_t csum_wordaddr, valid_checksum;
   12628 #endif
   12629 	int i;
   12630 
   12631 	checksum = 0;
   12632 
   12633 	/* Don't check for I211 */
   12634 	if (sc->sc_type == WM_T_I211)
   12635 		return 0;
   12636 
   12637 #ifdef WM_DEBUG
   12638 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12639 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12640 		csum_wordaddr = NVM_OFF_COMPAT;
   12641 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12642 	} else {
   12643 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12644 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12645 	}
   12646 
   12647 	/* Dump EEPROM image for debug */
   12648 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12649 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12650 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12651 		/* XXX PCH_SPT? */
   12652 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12653 		if ((eeprom_data & valid_checksum) == 0) {
   12654 			DPRINTF(WM_DEBUG_NVM,
   12655 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12656 				device_xname(sc->sc_dev), eeprom_data,
   12657 				    valid_checksum));
   12658 		}
   12659 	}
   12660 
   12661 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12662 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12663 		for (i = 0; i < NVM_SIZE; i++) {
   12664 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12665 				printf("XXXX ");
   12666 			else
   12667 				printf("%04hx ", eeprom_data);
   12668 			if (i % 8 == 7)
   12669 				printf("\n");
   12670 		}
   12671 	}
   12672 
   12673 #endif /* WM_DEBUG */
   12674 
   12675 	for (i = 0; i < NVM_SIZE; i++) {
   12676 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12677 			return 1;
   12678 		checksum += eeprom_data;
   12679 	}
   12680 
   12681 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12682 #ifdef WM_DEBUG
   12683 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12684 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12685 #endif
   12686 	}
   12687 
   12688 	return 0;
   12689 }
   12690 
   12691 static void
   12692 wm_nvm_version_invm(struct wm_softc *sc)
   12693 {
   12694 	uint32_t dword;
   12695 
   12696 	/*
   12697 	 * Linux's code to decode version is very strange, so we don't
   12698 	 * obey that algorithm and just use word 61 as the document.
   12699 	 * Perhaps it's not perfect though...
   12700 	 *
   12701 	 * Example:
   12702 	 *
   12703 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12704 	 */
   12705 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12706 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12707 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12708 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12709 }
   12710 
   12711 static void
   12712 wm_nvm_version(struct wm_softc *sc)
   12713 {
   12714 	uint16_t major, minor, build, patch;
   12715 	uint16_t uid0, uid1;
   12716 	uint16_t nvm_data;
   12717 	uint16_t off;
   12718 	bool check_version = false;
   12719 	bool check_optionrom = false;
   12720 	bool have_build = false;
   12721 	bool have_uid = true;
   12722 
   12723 	/*
   12724 	 * Version format:
   12725 	 *
   12726 	 * XYYZ
   12727 	 * X0YZ
   12728 	 * X0YY
   12729 	 *
   12730 	 * Example:
   12731 	 *
   12732 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12733 	 *	82571	0x50a6	5.10.6?
   12734 	 *	82572	0x506a	5.6.10?
   12735 	 *	82572EI	0x5069	5.6.9?
   12736 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12737 	 *		0x2013	2.1.3?
   12738 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12739 	 */
   12740 
   12741 	/*
   12742 	 * XXX
   12743 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12744 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12745 	 */
   12746 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12747 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12748 		have_uid = false;
   12749 
   12750 	switch (sc->sc_type) {
   12751 	case WM_T_82571:
   12752 	case WM_T_82572:
   12753 	case WM_T_82574:
   12754 	case WM_T_82583:
   12755 		check_version = true;
   12756 		check_optionrom = true;
   12757 		have_build = true;
   12758 		break;
   12759 	case WM_T_82575:
   12760 	case WM_T_82576:
   12761 	case WM_T_82580:
   12762 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12763 			check_version = true;
   12764 		break;
   12765 	case WM_T_I211:
   12766 		wm_nvm_version_invm(sc);
   12767 		have_uid = false;
   12768 		goto printver;
   12769 	case WM_T_I210:
   12770 		if (!wm_nvm_flash_presence_i210(sc)) {
   12771 			wm_nvm_version_invm(sc);
   12772 			have_uid = false;
   12773 			goto printver;
   12774 		}
   12775 		/* FALLTHROUGH */
   12776 	case WM_T_I350:
   12777 	case WM_T_I354:
   12778 		check_version = true;
   12779 		check_optionrom = true;
   12780 		break;
   12781 	default:
   12782 		return;
   12783 	}
   12784 	if (check_version
   12785 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12786 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12787 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12788 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12789 			build = nvm_data & NVM_BUILD_MASK;
   12790 			have_build = true;
   12791 		} else
   12792 			minor = nvm_data & 0x00ff;
   12793 
   12794 		/* Decimal */
   12795 		minor = (minor / 16) * 10 + (minor % 16);
   12796 		sc->sc_nvm_ver_major = major;
   12797 		sc->sc_nvm_ver_minor = minor;
   12798 
   12799 printver:
   12800 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12801 		    sc->sc_nvm_ver_minor);
   12802 		if (have_build) {
   12803 			sc->sc_nvm_ver_build = build;
   12804 			aprint_verbose(".%d", build);
   12805 		}
   12806 	}
   12807 
   12808 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12809 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12810 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12811 		/* Option ROM Version */
   12812 		if ((off != 0x0000) && (off != 0xffff)) {
   12813 			int rv;
   12814 
   12815 			off += NVM_COMBO_VER_OFF;
   12816 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12817 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12818 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12819 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12820 				/* 16bits */
   12821 				major = uid0 >> 8;
   12822 				build = (uid0 << 8) | (uid1 >> 8);
   12823 				patch = uid1 & 0x00ff;
   12824 				aprint_verbose(", option ROM Version %d.%d.%d",
   12825 				    major, build, patch);
   12826 			}
   12827 		}
   12828 	}
   12829 
   12830 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12831 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12832 }
   12833 
   12834 /*
   12835  * wm_nvm_read:
   12836  *
   12837  *	Read data from the serial EEPROM.
   12838  */
   12839 static int
   12840 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12841 {
   12842 	int rv;
   12843 
   12844 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12845 		device_xname(sc->sc_dev), __func__));
   12846 
   12847 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12848 		return -1;
   12849 
   12850 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12851 
   12852 	return rv;
   12853 }
   12854 
   12855 /*
   12856  * Hardware semaphores.
   12857  * Very complexed...
   12858  */
   12859 
   12860 static int
   12861 wm_get_null(struct wm_softc *sc)
   12862 {
   12863 
   12864 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12865 		device_xname(sc->sc_dev), __func__));
   12866 	return 0;
   12867 }
   12868 
   12869 static void
   12870 wm_put_null(struct wm_softc *sc)
   12871 {
   12872 
   12873 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12874 		device_xname(sc->sc_dev), __func__));
   12875 	return;
   12876 }
   12877 
   12878 static int
   12879 wm_get_eecd(struct wm_softc *sc)
   12880 {
   12881 	uint32_t reg;
   12882 	int x;
   12883 
   12884 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12885 		device_xname(sc->sc_dev), __func__));
   12886 
   12887 	reg = CSR_READ(sc, WMREG_EECD);
   12888 
   12889 	/* Request EEPROM access. */
   12890 	reg |= EECD_EE_REQ;
   12891 	CSR_WRITE(sc, WMREG_EECD, reg);
   12892 
   12893 	/* ..and wait for it to be granted. */
   12894 	for (x = 0; x < 1000; x++) {
   12895 		reg = CSR_READ(sc, WMREG_EECD);
   12896 		if (reg & EECD_EE_GNT)
   12897 			break;
   12898 		delay(5);
   12899 	}
   12900 	if ((reg & EECD_EE_GNT) == 0) {
   12901 		aprint_error_dev(sc->sc_dev,
   12902 		    "could not acquire EEPROM GNT\n");
   12903 		reg &= ~EECD_EE_REQ;
   12904 		CSR_WRITE(sc, WMREG_EECD, reg);
   12905 		return -1;
   12906 	}
   12907 
   12908 	return 0;
   12909 }
   12910 
   12911 static void
   12912 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12913 {
   12914 
   12915 	*eecd |= EECD_SK;
   12916 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12917 	CSR_WRITE_FLUSH(sc);
   12918 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12919 		delay(1);
   12920 	else
   12921 		delay(50);
   12922 }
   12923 
   12924 static void
   12925 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12926 {
   12927 
   12928 	*eecd &= ~EECD_SK;
   12929 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12930 	CSR_WRITE_FLUSH(sc);
   12931 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12932 		delay(1);
   12933 	else
   12934 		delay(50);
   12935 }
   12936 
   12937 static void
   12938 wm_put_eecd(struct wm_softc *sc)
   12939 {
   12940 	uint32_t reg;
   12941 
   12942 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12943 		device_xname(sc->sc_dev), __func__));
   12944 
   12945 	/* Stop nvm */
   12946 	reg = CSR_READ(sc, WMREG_EECD);
   12947 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12948 		/* Pull CS high */
   12949 		reg |= EECD_CS;
   12950 		wm_nvm_eec_clock_lower(sc, &reg);
   12951 	} else {
   12952 		/* CS on Microwire is active-high */
   12953 		reg &= ~(EECD_CS | EECD_DI);
   12954 		CSR_WRITE(sc, WMREG_EECD, reg);
   12955 		wm_nvm_eec_clock_raise(sc, &reg);
   12956 		wm_nvm_eec_clock_lower(sc, &reg);
   12957 	}
   12958 
   12959 	reg = CSR_READ(sc, WMREG_EECD);
   12960 	reg &= ~EECD_EE_REQ;
   12961 	CSR_WRITE(sc, WMREG_EECD, reg);
   12962 
   12963 	return;
   12964 }
   12965 
   12966 /*
   12967  * Get hardware semaphore.
   12968  * Same as e1000_get_hw_semaphore_generic()
   12969  */
   12970 static int
   12971 wm_get_swsm_semaphore(struct wm_softc *sc)
   12972 {
   12973 	int32_t timeout;
   12974 	uint32_t swsm;
   12975 
   12976 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12977 		device_xname(sc->sc_dev), __func__));
   12978 	KASSERT(sc->sc_nvm_wordsize > 0);
   12979 
   12980 retry:
   12981 	/* Get the SW semaphore. */
   12982 	timeout = sc->sc_nvm_wordsize + 1;
   12983 	while (timeout) {
   12984 		swsm = CSR_READ(sc, WMREG_SWSM);
   12985 
   12986 		if ((swsm & SWSM_SMBI) == 0)
   12987 			break;
   12988 
   12989 		delay(50);
   12990 		timeout--;
   12991 	}
   12992 
   12993 	if (timeout == 0) {
   12994 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12995 			/*
   12996 			 * In rare circumstances, the SW semaphore may already
   12997 			 * be held unintentionally. Clear the semaphore once
   12998 			 * before giving up.
   12999 			 */
   13000 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13001 			wm_put_swsm_semaphore(sc);
   13002 			goto retry;
   13003 		}
   13004 		aprint_error_dev(sc->sc_dev,
   13005 		    "could not acquire SWSM SMBI\n");
   13006 		return 1;
   13007 	}
   13008 
   13009 	/* Get the FW semaphore. */
   13010 	timeout = sc->sc_nvm_wordsize + 1;
   13011 	while (timeout) {
   13012 		swsm = CSR_READ(sc, WMREG_SWSM);
   13013 		swsm |= SWSM_SWESMBI;
   13014 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13015 		/* If we managed to set the bit we got the semaphore. */
   13016 		swsm = CSR_READ(sc, WMREG_SWSM);
   13017 		if (swsm & SWSM_SWESMBI)
   13018 			break;
   13019 
   13020 		delay(50);
   13021 		timeout--;
   13022 	}
   13023 
   13024 	if (timeout == 0) {
   13025 		aprint_error_dev(sc->sc_dev,
   13026 		    "could not acquire SWSM SWESMBI\n");
   13027 		/* Release semaphores */
   13028 		wm_put_swsm_semaphore(sc);
   13029 		return 1;
   13030 	}
   13031 	return 0;
   13032 }
   13033 
   13034 /*
   13035  * Put hardware semaphore.
   13036  * Same as e1000_put_hw_semaphore_generic()
   13037  */
   13038 static void
   13039 wm_put_swsm_semaphore(struct wm_softc *sc)
   13040 {
   13041 	uint32_t swsm;
   13042 
   13043 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13044 		device_xname(sc->sc_dev), __func__));
   13045 
   13046 	swsm = CSR_READ(sc, WMREG_SWSM);
   13047 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13048 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13049 }
   13050 
   13051 /*
   13052  * Get SW/FW semaphore.
   13053  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13054  */
   13055 static int
   13056 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13057 {
   13058 	uint32_t swfw_sync;
   13059 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13060 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13061 	int timeout;
   13062 
   13063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13064 		device_xname(sc->sc_dev), __func__));
   13065 
   13066 	if (sc->sc_type == WM_T_80003)
   13067 		timeout = 50;
   13068 	else
   13069 		timeout = 200;
   13070 
   13071 	for (timeout = 0; timeout < 200; timeout++) {
   13072 		if (wm_get_swsm_semaphore(sc)) {
   13073 			aprint_error_dev(sc->sc_dev,
   13074 			    "%s: failed to get semaphore\n",
   13075 			    __func__);
   13076 			return 1;
   13077 		}
   13078 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13079 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13080 			swfw_sync |= swmask;
   13081 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13082 			wm_put_swsm_semaphore(sc);
   13083 			return 0;
   13084 		}
   13085 		wm_put_swsm_semaphore(sc);
   13086 		delay(5000);
   13087 	}
   13088 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13089 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13090 	return 1;
   13091 }
   13092 
   13093 static void
   13094 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13095 {
   13096 	uint32_t swfw_sync;
   13097 
   13098 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13099 		device_xname(sc->sc_dev), __func__));
   13100 
   13101 	while (wm_get_swsm_semaphore(sc) != 0)
   13102 		continue;
   13103 
   13104 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13105 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13106 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13107 
   13108 	wm_put_swsm_semaphore(sc);
   13109 }
   13110 
   13111 static int
   13112 wm_get_nvm_80003(struct wm_softc *sc)
   13113 {
   13114 	int rv;
   13115 
   13116 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13117 		device_xname(sc->sc_dev), __func__));
   13118 
   13119 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13120 		aprint_error_dev(sc->sc_dev,
   13121 		    "%s: failed to get semaphore(SWFW)\n",
   13122 		    __func__);
   13123 		return rv;
   13124 	}
   13125 
   13126 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13127 	    && (rv = wm_get_eecd(sc)) != 0) {
   13128 		aprint_error_dev(sc->sc_dev,
   13129 		    "%s: failed to get semaphore(EECD)\n",
   13130 		    __func__);
   13131 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13132 		return rv;
   13133 	}
   13134 
   13135 	return 0;
   13136 }
   13137 
   13138 static void
   13139 wm_put_nvm_80003(struct wm_softc *sc)
   13140 {
   13141 
   13142 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13143 		device_xname(sc->sc_dev), __func__));
   13144 
   13145 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13146 		wm_put_eecd(sc);
   13147 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13148 }
   13149 
   13150 static int
   13151 wm_get_nvm_82571(struct wm_softc *sc)
   13152 {
   13153 	int rv;
   13154 
   13155 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13156 		device_xname(sc->sc_dev), __func__));
   13157 
   13158 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13159 		return rv;
   13160 
   13161 	switch (sc->sc_type) {
   13162 	case WM_T_82573:
   13163 		break;
   13164 	default:
   13165 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13166 			rv = wm_get_eecd(sc);
   13167 		break;
   13168 	}
   13169 
   13170 	if (rv != 0) {
   13171 		aprint_error_dev(sc->sc_dev,
   13172 		    "%s: failed to get semaphore\n",
   13173 		    __func__);
   13174 		wm_put_swsm_semaphore(sc);
   13175 	}
   13176 
   13177 	return rv;
   13178 }
   13179 
   13180 static void
   13181 wm_put_nvm_82571(struct wm_softc *sc)
   13182 {
   13183 
   13184 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13185 		device_xname(sc->sc_dev), __func__));
   13186 
   13187 	switch (sc->sc_type) {
   13188 	case WM_T_82573:
   13189 		break;
   13190 	default:
   13191 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13192 			wm_put_eecd(sc);
   13193 		break;
   13194 	}
   13195 
   13196 	wm_put_swsm_semaphore(sc);
   13197 }
   13198 
   13199 static int
   13200 wm_get_phy_82575(struct wm_softc *sc)
   13201 {
   13202 
   13203 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13204 		device_xname(sc->sc_dev), __func__));
   13205 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13206 }
   13207 
   13208 static void
   13209 wm_put_phy_82575(struct wm_softc *sc)
   13210 {
   13211 
   13212 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13213 		device_xname(sc->sc_dev), __func__));
   13214 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13215 }
   13216 
   13217 static int
   13218 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13219 {
   13220 	uint32_t ext_ctrl;
   13221 	int timeout = 200;
   13222 
   13223 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13224 		device_xname(sc->sc_dev), __func__));
   13225 
   13226 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13227 	for (timeout = 0; timeout < 200; timeout++) {
   13228 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13229 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13230 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13231 
   13232 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13233 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13234 			return 0;
   13235 		delay(5000);
   13236 	}
   13237 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13238 	    device_xname(sc->sc_dev), ext_ctrl);
   13239 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13240 	return 1;
   13241 }
   13242 
   13243 static void
   13244 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13245 {
   13246 	uint32_t ext_ctrl;
   13247 
   13248 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13249 		device_xname(sc->sc_dev), __func__));
   13250 
   13251 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13252 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13253 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13254 
   13255 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13256 }
   13257 
   13258 static int
   13259 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13260 {
   13261 	uint32_t ext_ctrl;
   13262 	int timeout;
   13263 
   13264 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13265 		device_xname(sc->sc_dev), __func__));
   13266 	mutex_enter(sc->sc_ich_phymtx);
   13267 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13268 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13269 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13270 			break;
   13271 		delay(1000);
   13272 	}
   13273 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13274 		printf("%s: SW has already locked the resource\n",
   13275 		    device_xname(sc->sc_dev));
   13276 		goto out;
   13277 	}
   13278 
   13279 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13280 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13281 	for (timeout = 0; timeout < 1000; timeout++) {
   13282 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13283 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13284 			break;
   13285 		delay(1000);
   13286 	}
   13287 	if (timeout >= 1000) {
   13288 		printf("%s: failed to acquire semaphore\n",
   13289 		    device_xname(sc->sc_dev));
   13290 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13291 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13292 		goto out;
   13293 	}
   13294 	return 0;
   13295 
   13296 out:
   13297 	mutex_exit(sc->sc_ich_phymtx);
   13298 	return 1;
   13299 }
   13300 
   13301 static void
   13302 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13303 {
   13304 	uint32_t ext_ctrl;
   13305 
   13306 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13307 		device_xname(sc->sc_dev), __func__));
   13308 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13309 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13310 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13311 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13312 	} else {
   13313 		printf("%s: Semaphore unexpectedly released\n",
   13314 		    device_xname(sc->sc_dev));
   13315 	}
   13316 
   13317 	mutex_exit(sc->sc_ich_phymtx);
   13318 }
   13319 
   13320 static int
   13321 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13322 {
   13323 
   13324 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13325 		device_xname(sc->sc_dev), __func__));
   13326 	mutex_enter(sc->sc_ich_nvmmtx);
   13327 
   13328 	return 0;
   13329 }
   13330 
   13331 static void
   13332 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13333 {
   13334 
   13335 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13336 		device_xname(sc->sc_dev), __func__));
   13337 	mutex_exit(sc->sc_ich_nvmmtx);
   13338 }
   13339 
   13340 static int
   13341 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13342 {
   13343 	int i = 0;
   13344 	uint32_t reg;
   13345 
   13346 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13347 		device_xname(sc->sc_dev), __func__));
   13348 
   13349 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13350 	do {
   13351 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13352 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13353 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13354 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13355 			break;
   13356 		delay(2*1000);
   13357 		i++;
   13358 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13359 
   13360 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13361 		wm_put_hw_semaphore_82573(sc);
   13362 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13363 		    device_xname(sc->sc_dev));
   13364 		return -1;
   13365 	}
   13366 
   13367 	return 0;
   13368 }
   13369 
   13370 static void
   13371 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13372 {
   13373 	uint32_t reg;
   13374 
   13375 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13376 		device_xname(sc->sc_dev), __func__));
   13377 
   13378 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13379 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13380 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13381 }
   13382 
   13383 /*
   13384  * Management mode and power management related subroutines.
   13385  * BMC, AMT, suspend/resume and EEE.
   13386  */
   13387 
   13388 #ifdef WM_WOL
   13389 static int
   13390 wm_check_mng_mode(struct wm_softc *sc)
   13391 {
   13392 	int rv;
   13393 
   13394 	switch (sc->sc_type) {
   13395 	case WM_T_ICH8:
   13396 	case WM_T_ICH9:
   13397 	case WM_T_ICH10:
   13398 	case WM_T_PCH:
   13399 	case WM_T_PCH2:
   13400 	case WM_T_PCH_LPT:
   13401 	case WM_T_PCH_SPT:
   13402 	case WM_T_PCH_CNP:
   13403 		rv = wm_check_mng_mode_ich8lan(sc);
   13404 		break;
   13405 	case WM_T_82574:
   13406 	case WM_T_82583:
   13407 		rv = wm_check_mng_mode_82574(sc);
   13408 		break;
   13409 	case WM_T_82571:
   13410 	case WM_T_82572:
   13411 	case WM_T_82573:
   13412 	case WM_T_80003:
   13413 		rv = wm_check_mng_mode_generic(sc);
   13414 		break;
   13415 	default:
   13416 		/* noting to do */
   13417 		rv = 0;
   13418 		break;
   13419 	}
   13420 
   13421 	return rv;
   13422 }
   13423 
   13424 static int
   13425 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13426 {
   13427 	uint32_t fwsm;
   13428 
   13429 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13430 
   13431 	if (((fwsm & FWSM_FW_VALID) != 0)
   13432 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13433 		return 1;
   13434 
   13435 	return 0;
   13436 }
   13437 
   13438 static int
   13439 wm_check_mng_mode_82574(struct wm_softc *sc)
   13440 {
   13441 	uint16_t data;
   13442 
   13443 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13444 
   13445 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13446 		return 1;
   13447 
   13448 	return 0;
   13449 }
   13450 
   13451 static int
   13452 wm_check_mng_mode_generic(struct wm_softc *sc)
   13453 {
   13454 	uint32_t fwsm;
   13455 
   13456 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13457 
   13458 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13459 		return 1;
   13460 
   13461 	return 0;
   13462 }
   13463 #endif /* WM_WOL */
   13464 
   13465 static int
   13466 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13467 {
   13468 	uint32_t manc, fwsm, factps;
   13469 
   13470 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13471 		return 0;
   13472 
   13473 	manc = CSR_READ(sc, WMREG_MANC);
   13474 
   13475 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13476 		device_xname(sc->sc_dev), manc));
   13477 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13478 		return 0;
   13479 
   13480 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13481 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13482 		factps = CSR_READ(sc, WMREG_FACTPS);
   13483 		if (((factps & FACTPS_MNGCG) == 0)
   13484 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13485 			return 1;
   13486 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13487 		uint16_t data;
   13488 
   13489 		factps = CSR_READ(sc, WMREG_FACTPS);
   13490 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13491 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13492 			device_xname(sc->sc_dev), factps, data));
   13493 		if (((factps & FACTPS_MNGCG) == 0)
   13494 		    && ((data & NVM_CFG2_MNGM_MASK)
   13495 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13496 			return 1;
   13497 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13498 	    && ((manc & MANC_ASF_EN) == 0))
   13499 		return 1;
   13500 
   13501 	return 0;
   13502 }
   13503 
   13504 static bool
   13505 wm_phy_resetisblocked(struct wm_softc *sc)
   13506 {
   13507 	bool blocked = false;
   13508 	uint32_t reg;
   13509 	int i = 0;
   13510 
   13511 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13512 		device_xname(sc->sc_dev), __func__));
   13513 
   13514 	switch (sc->sc_type) {
   13515 	case WM_T_ICH8:
   13516 	case WM_T_ICH9:
   13517 	case WM_T_ICH10:
   13518 	case WM_T_PCH:
   13519 	case WM_T_PCH2:
   13520 	case WM_T_PCH_LPT:
   13521 	case WM_T_PCH_SPT:
   13522 	case WM_T_PCH_CNP:
   13523 		do {
   13524 			reg = CSR_READ(sc, WMREG_FWSM);
   13525 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13526 				blocked = true;
   13527 				delay(10*1000);
   13528 				continue;
   13529 			}
   13530 			blocked = false;
   13531 		} while (blocked && (i++ < 30));
   13532 		return blocked;
   13533 		break;
   13534 	case WM_T_82571:
   13535 	case WM_T_82572:
   13536 	case WM_T_82573:
   13537 	case WM_T_82574:
   13538 	case WM_T_82583:
   13539 	case WM_T_80003:
   13540 		reg = CSR_READ(sc, WMREG_MANC);
   13541 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13542 			return true;
   13543 		else
   13544 			return false;
   13545 		break;
   13546 	default:
   13547 		/* no problem */
   13548 		break;
   13549 	}
   13550 
   13551 	return false;
   13552 }
   13553 
   13554 static void
   13555 wm_get_hw_control(struct wm_softc *sc)
   13556 {
   13557 	uint32_t reg;
   13558 
   13559 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13560 		device_xname(sc->sc_dev), __func__));
   13561 
   13562 	if (sc->sc_type == WM_T_82573) {
   13563 		reg = CSR_READ(sc, WMREG_SWSM);
   13564 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13565 	} else if (sc->sc_type >= WM_T_82571) {
   13566 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13567 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13568 	}
   13569 }
   13570 
   13571 static void
   13572 wm_release_hw_control(struct wm_softc *sc)
   13573 {
   13574 	uint32_t reg;
   13575 
   13576 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13577 		device_xname(sc->sc_dev), __func__));
   13578 
   13579 	if (sc->sc_type == WM_T_82573) {
   13580 		reg = CSR_READ(sc, WMREG_SWSM);
   13581 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13582 	} else if (sc->sc_type >= WM_T_82571) {
   13583 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13584 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13585 	}
   13586 }
   13587 
   13588 static void
   13589 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13590 {
   13591 	uint32_t reg;
   13592 
   13593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13594 		device_xname(sc->sc_dev), __func__));
   13595 
   13596 	if (sc->sc_type < WM_T_PCH2)
   13597 		return;
   13598 
   13599 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13600 
   13601 	if (gate)
   13602 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13603 	else
   13604 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13605 
   13606 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13607 }
   13608 
   13609 static void
   13610 wm_smbustopci(struct wm_softc *sc)
   13611 {
   13612 	uint32_t fwsm, reg;
   13613 	int rv = 0;
   13614 
   13615 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13616 		device_xname(sc->sc_dev), __func__));
   13617 
   13618 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13619 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13620 
   13621 	/* Disable ULP */
   13622 	wm_ulp_disable(sc);
   13623 
   13624 	/* Acquire PHY semaphore */
   13625 	sc->phy.acquire(sc);
   13626 
   13627 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13628 	switch (sc->sc_type) {
   13629 	case WM_T_PCH_LPT:
   13630 	case WM_T_PCH_SPT:
   13631 	case WM_T_PCH_CNP:
   13632 		if (wm_phy_is_accessible_pchlan(sc))
   13633 			break;
   13634 
   13635 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13636 		reg |= CTRL_EXT_FORCE_SMBUS;
   13637 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13638 #if 0
   13639 		/* XXX Isn't this required??? */
   13640 		CSR_WRITE_FLUSH(sc);
   13641 #endif
   13642 		delay(50 * 1000);
   13643 		/* FALLTHROUGH */
   13644 	case WM_T_PCH2:
   13645 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13646 			break;
   13647 		/* FALLTHROUGH */
   13648 	case WM_T_PCH:
   13649 		if (sc->sc_type == WM_T_PCH)
   13650 			if ((fwsm & FWSM_FW_VALID) != 0)
   13651 				break;
   13652 
   13653 		if (wm_phy_resetisblocked(sc) == true) {
   13654 			printf("XXX reset is blocked(3)\n");
   13655 			break;
   13656 		}
   13657 
   13658 		wm_toggle_lanphypc_pch_lpt(sc);
   13659 
   13660 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13661 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13662 				break;
   13663 
   13664 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13665 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13666 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13667 
   13668 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13669 				break;
   13670 			rv = -1;
   13671 		}
   13672 		break;
   13673 	default:
   13674 		break;
   13675 	}
   13676 
   13677 	/* Release semaphore */
   13678 	sc->phy.release(sc);
   13679 
   13680 	if (rv == 0) {
   13681 		if (wm_phy_resetisblocked(sc)) {
   13682 			printf("XXX reset is blocked(4)\n");
   13683 			goto out;
   13684 		}
   13685 		wm_reset_phy(sc);
   13686 		if (wm_phy_resetisblocked(sc))
   13687 			printf("XXX reset is blocked(4)\n");
   13688 	}
   13689 
   13690 out:
   13691 	/*
   13692 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13693 	 */
   13694 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13695 		delay(10*1000);
   13696 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13697 	}
   13698 }
   13699 
   13700 static void
   13701 wm_init_manageability(struct wm_softc *sc)
   13702 {
   13703 
   13704 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13705 		device_xname(sc->sc_dev), __func__));
   13706 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13707 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13708 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13709 
   13710 		/* Disable hardware interception of ARP */
   13711 		manc &= ~MANC_ARP_EN;
   13712 
   13713 		/* Enable receiving management packets to the host */
   13714 		if (sc->sc_type >= WM_T_82571) {
   13715 			manc |= MANC_EN_MNG2HOST;
   13716 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13717 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13718 		}
   13719 
   13720 		CSR_WRITE(sc, WMREG_MANC, manc);
   13721 	}
   13722 }
   13723 
   13724 static void
   13725 wm_release_manageability(struct wm_softc *sc)
   13726 {
   13727 
   13728 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13729 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13730 
   13731 		manc |= MANC_ARP_EN;
   13732 		if (sc->sc_type >= WM_T_82571)
   13733 			manc &= ~MANC_EN_MNG2HOST;
   13734 
   13735 		CSR_WRITE(sc, WMREG_MANC, manc);
   13736 	}
   13737 }
   13738 
   13739 static void
   13740 wm_get_wakeup(struct wm_softc *sc)
   13741 {
   13742 
   13743 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13744 	switch (sc->sc_type) {
   13745 	case WM_T_82573:
   13746 	case WM_T_82583:
   13747 		sc->sc_flags |= WM_F_HAS_AMT;
   13748 		/* FALLTHROUGH */
   13749 	case WM_T_80003:
   13750 	case WM_T_82575:
   13751 	case WM_T_82576:
   13752 	case WM_T_82580:
   13753 	case WM_T_I350:
   13754 	case WM_T_I354:
   13755 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13756 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13757 		/* FALLTHROUGH */
   13758 	case WM_T_82541:
   13759 	case WM_T_82541_2:
   13760 	case WM_T_82547:
   13761 	case WM_T_82547_2:
   13762 	case WM_T_82571:
   13763 	case WM_T_82572:
   13764 	case WM_T_82574:
   13765 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13766 		break;
   13767 	case WM_T_ICH8:
   13768 	case WM_T_ICH9:
   13769 	case WM_T_ICH10:
   13770 	case WM_T_PCH:
   13771 	case WM_T_PCH2:
   13772 	case WM_T_PCH_LPT:
   13773 	case WM_T_PCH_SPT:
   13774 	case WM_T_PCH_CNP:
   13775 		sc->sc_flags |= WM_F_HAS_AMT;
   13776 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13777 		break;
   13778 	default:
   13779 		break;
   13780 	}
   13781 
   13782 	/* 1: HAS_MANAGE */
   13783 	if (wm_enable_mng_pass_thru(sc) != 0)
   13784 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13785 
   13786 	/*
   13787 	 * Note that the WOL flags is set after the resetting of the eeprom
   13788 	 * stuff
   13789 	 */
   13790 }
   13791 
   13792 /*
   13793  * Unconfigure Ultra Low Power mode.
   13794  * Only for I217 and newer (see below).
   13795  */
   13796 static void
   13797 wm_ulp_disable(struct wm_softc *sc)
   13798 {
   13799 	uint32_t reg;
   13800 	int i = 0;
   13801 
   13802 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13803 		device_xname(sc->sc_dev), __func__));
   13804 	/* Exclude old devices */
   13805 	if ((sc->sc_type < WM_T_PCH_LPT)
   13806 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13807 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13808 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13809 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13810 		return;
   13811 
   13812 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13813 		/* Request ME un-configure ULP mode in the PHY */
   13814 		reg = CSR_READ(sc, WMREG_H2ME);
   13815 		reg &= ~H2ME_ULP;
   13816 		reg |= H2ME_ENFORCE_SETTINGS;
   13817 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13818 
   13819 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13820 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13821 			if (i++ == 30) {
   13822 				printf("%s timed out\n", __func__);
   13823 				return;
   13824 			}
   13825 			delay(10 * 1000);
   13826 		}
   13827 		reg = CSR_READ(sc, WMREG_H2ME);
   13828 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13829 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13830 
   13831 		return;
   13832 	}
   13833 
   13834 	/* Acquire semaphore */
   13835 	sc->phy.acquire(sc);
   13836 
   13837 	/* Toggle LANPHYPC */
   13838 	wm_toggle_lanphypc_pch_lpt(sc);
   13839 
   13840 	/* Unforce SMBus mode in PHY */
   13841 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13842 	if (reg == 0x0000 || reg == 0xffff) {
   13843 		uint32_t reg2;
   13844 
   13845 		printf("%s: Force SMBus first.\n", __func__);
   13846 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13847 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13848 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13849 		delay(50 * 1000);
   13850 
   13851 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13852 	}
   13853 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13854 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13855 
   13856 	/* Unforce SMBus mode in MAC */
   13857 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13858 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13859 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13860 
   13861 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13862 	reg |= HV_PM_CTRL_K1_ENA;
   13863 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13864 
   13865 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13866 	reg &= ~(I218_ULP_CONFIG1_IND
   13867 	    | I218_ULP_CONFIG1_STICKY_ULP
   13868 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13869 	    | I218_ULP_CONFIG1_WOL_HOST
   13870 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13871 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13872 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13873 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13874 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13875 	reg |= I218_ULP_CONFIG1_START;
   13876 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13877 
   13878 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13879 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13880 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13881 
   13882 	/* Release semaphore */
   13883 	sc->phy.release(sc);
   13884 	wm_gmii_reset(sc);
   13885 	delay(50 * 1000);
   13886 }
   13887 
   13888 /* WOL in the newer chipset interfaces (pchlan) */
   13889 static void
   13890 wm_enable_phy_wakeup(struct wm_softc *sc)
   13891 {
   13892 #if 0
   13893 	uint16_t preg;
   13894 
   13895 	/* Copy MAC RARs to PHY RARs */
   13896 
   13897 	/* Copy MAC MTA to PHY MTA */
   13898 
   13899 	/* Configure PHY Rx Control register */
   13900 
   13901 	/* Enable PHY wakeup in MAC register */
   13902 
   13903 	/* Configure and enable PHY wakeup in PHY registers */
   13904 
   13905 	/* Activate PHY wakeup */
   13906 
   13907 	/* XXX */
   13908 #endif
   13909 }
   13910 
   13911 /* Power down workaround on D3 */
   13912 static void
   13913 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13914 {
   13915 	uint32_t reg;
   13916 	int i;
   13917 
   13918 	for (i = 0; i < 2; i++) {
   13919 		/* Disable link */
   13920 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13921 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13922 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13923 
   13924 		/*
   13925 		 * Call gig speed drop workaround on Gig disable before
   13926 		 * accessing any PHY registers
   13927 		 */
   13928 		if (sc->sc_type == WM_T_ICH8)
   13929 			wm_gig_downshift_workaround_ich8lan(sc);
   13930 
   13931 		/* Write VR power-down enable */
   13932 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13933 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13934 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13935 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13936 
   13937 		/* Read it back and test */
   13938 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13939 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13940 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13941 			break;
   13942 
   13943 		/* Issue PHY reset and repeat at most one more time */
   13944 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13945 	}
   13946 }
   13947 
   13948 static void
   13949 wm_enable_wakeup(struct wm_softc *sc)
   13950 {
   13951 	uint32_t reg, pmreg;
   13952 	pcireg_t pmode;
   13953 
   13954 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13955 		device_xname(sc->sc_dev), __func__));
   13956 
   13957 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13958 		&pmreg, NULL) == 0)
   13959 		return;
   13960 
   13961 	/* Advertise the wakeup capability */
   13962 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13963 	    | CTRL_SWDPIN(3));
   13964 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13965 
   13966 	/* ICH workaround */
   13967 	switch (sc->sc_type) {
   13968 	case WM_T_ICH8:
   13969 	case WM_T_ICH9:
   13970 	case WM_T_ICH10:
   13971 	case WM_T_PCH:
   13972 	case WM_T_PCH2:
   13973 	case WM_T_PCH_LPT:
   13974 	case WM_T_PCH_SPT:
   13975 	case WM_T_PCH_CNP:
   13976 		/* Disable gig during WOL */
   13977 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13978 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13979 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13980 		if (sc->sc_type == WM_T_PCH)
   13981 			wm_gmii_reset(sc);
   13982 
   13983 		/* Power down workaround */
   13984 		if (sc->sc_phytype == WMPHY_82577) {
   13985 			struct mii_softc *child;
   13986 
   13987 			/* Assume that the PHY is copper */
   13988 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13989 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13990 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13991 				    (768 << 5) | 25, 0x0444); /* magic num */
   13992 		}
   13993 		break;
   13994 	default:
   13995 		break;
   13996 	}
   13997 
   13998 	/* Keep the laser running on fiber adapters */
   13999 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14000 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14001 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14002 		reg |= CTRL_EXT_SWDPIN(3);
   14003 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14004 	}
   14005 
   14006 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14007 #if 0	/* for the multicast packet */
   14008 	reg |= WUFC_MC;
   14009 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14010 #endif
   14011 
   14012 	if (sc->sc_type >= WM_T_PCH)
   14013 		wm_enable_phy_wakeup(sc);
   14014 	else {
   14015 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14016 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14017 	}
   14018 
   14019 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14020 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14021 		|| (sc->sc_type == WM_T_PCH2))
   14022 		    && (sc->sc_phytype == WMPHY_IGP_3))
   14023 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14024 
   14025 	/* Request PME */
   14026 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14027 #if 0
   14028 	/* Disable WOL */
   14029 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14030 #else
   14031 	/* For WOL */
   14032 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14033 #endif
   14034 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14035 }
   14036 
   14037 /* Disable ASPM L0s and/or L1 for workaround */
   14038 static void
   14039 wm_disable_aspm(struct wm_softc *sc)
   14040 {
   14041 	pcireg_t reg, mask = 0;
   14042 	unsigned const char *str = "";
   14043 
   14044 	/*
   14045 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14046 	 * space.
   14047 	 */
   14048 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14049 		return;
   14050 
   14051 	switch (sc->sc_type) {
   14052 	case WM_T_82571:
   14053 	case WM_T_82572:
   14054 		/*
   14055 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14056 		 * State Power management L1 State (ASPM L1).
   14057 		 */
   14058 		mask = PCIE_LCSR_ASPM_L1;
   14059 		str = "L1 is";
   14060 		break;
   14061 	case WM_T_82573:
   14062 	case WM_T_82574:
   14063 	case WM_T_82583:
   14064 		/*
   14065 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14066 		 *
   14067 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14068 		 * some chipset.  The document of 82574 and 82583 says that
   14069 		 * disabling L0s with some specific chipset is sufficient,
   14070 		 * but we follow as of the Intel em driver does.
   14071 		 *
   14072 		 * References:
   14073 		 * Errata 8 of the Specification Update of i82573.
   14074 		 * Errata 20 of the Specification Update of i82574.
   14075 		 * Errata 9 of the Specification Update of i82583.
   14076 		 */
   14077 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14078 		str = "L0s and L1 are";
   14079 		break;
   14080 	default:
   14081 		return;
   14082 	}
   14083 
   14084 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14085 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14086 	reg &= ~mask;
   14087 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14088 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14089 
   14090 	/* Print only in wm_attach() */
   14091 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14092 		aprint_verbose_dev(sc->sc_dev,
   14093 		    "ASPM %s disabled to workaround the errata.\n",
   14094 			str);
   14095 }
   14096 
   14097 /* LPLU */
   14098 
   14099 static void
   14100 wm_lplu_d0_disable(struct wm_softc *sc)
   14101 {
   14102 	struct mii_data *mii = &sc->sc_mii;
   14103 	uint32_t reg;
   14104 
   14105 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14106 		device_xname(sc->sc_dev), __func__));
   14107 
   14108 	if (sc->sc_phytype == WMPHY_IFE)
   14109 		return;
   14110 
   14111 	switch (sc->sc_type) {
   14112 	case WM_T_82571:
   14113 	case WM_T_82572:
   14114 	case WM_T_82573:
   14115 	case WM_T_82575:
   14116 	case WM_T_82576:
   14117 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14118 		reg &= ~PMR_D0_LPLU;
   14119 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14120 		break;
   14121 	case WM_T_82580:
   14122 	case WM_T_I350:
   14123 	case WM_T_I210:
   14124 	case WM_T_I211:
   14125 		reg = CSR_READ(sc, WMREG_PHPM);
   14126 		reg &= ~PHPM_D0A_LPLU;
   14127 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14128 		break;
   14129 	case WM_T_82574:
   14130 	case WM_T_82583:
   14131 	case WM_T_ICH8:
   14132 	case WM_T_ICH9:
   14133 	case WM_T_ICH10:
   14134 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14135 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14136 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14137 		CSR_WRITE_FLUSH(sc);
   14138 		break;
   14139 	case WM_T_PCH:
   14140 	case WM_T_PCH2:
   14141 	case WM_T_PCH_LPT:
   14142 	case WM_T_PCH_SPT:
   14143 	case WM_T_PCH_CNP:
   14144 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14145 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14146 		if (wm_phy_resetisblocked(sc) == false)
   14147 			reg |= HV_OEM_BITS_ANEGNOW;
   14148 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14149 		break;
   14150 	default:
   14151 		break;
   14152 	}
   14153 }
   14154 
   14155 /* EEE */
   14156 
   14157 static void
   14158 wm_set_eee_i350(struct wm_softc *sc)
   14159 {
   14160 	uint32_t ipcnfg, eeer;
   14161 
   14162 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14163 	eeer = CSR_READ(sc, WMREG_EEER);
   14164 
   14165 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14166 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14167 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14168 		    | EEER_LPI_FC);
   14169 	} else {
   14170 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14171 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14172 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14173 		    | EEER_LPI_FC);
   14174 	}
   14175 
   14176 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14177 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14178 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14179 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14180 }
   14181 
   14182 /*
   14183  * Workarounds (mainly PHY related).
   14184  * Basically, PHY's workarounds are in the PHY drivers.
   14185  */
   14186 
   14187 /* Work-around for 82566 Kumeran PCS lock loss */
   14188 static void
   14189 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14190 {
   14191 	struct mii_data *mii = &sc->sc_mii;
   14192 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14193 	int i;
   14194 	int reg;
   14195 
   14196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14197 		device_xname(sc->sc_dev), __func__));
   14198 
   14199 	/* If the link is not up, do nothing */
   14200 	if ((status & STATUS_LU) == 0)
   14201 		return;
   14202 
   14203 	/* Nothing to do if the link is other than 1Gbps */
   14204 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14205 		return;
   14206 
   14207 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14208 	for (i = 0; i < 10; i++) {
   14209 		/* read twice */
   14210 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14211 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14212 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14213 			goto out;	/* GOOD! */
   14214 
   14215 		/* Reset the PHY */
   14216 		wm_reset_phy(sc);
   14217 		delay(5*1000);
   14218 	}
   14219 
   14220 	/* Disable GigE link negotiation */
   14221 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14222 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14223 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14224 
   14225 	/*
   14226 	 * Call gig speed drop workaround on Gig disable before accessing
   14227 	 * any PHY registers.
   14228 	 */
   14229 	wm_gig_downshift_workaround_ich8lan(sc);
   14230 
   14231 out:
   14232 	return;
   14233 }
   14234 
   14235 /* WOL from S5 stops working */
   14236 static void
   14237 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14238 {
   14239 	uint16_t kmreg;
   14240 
   14241 	/* Only for igp3 */
   14242 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14243 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14244 			return;
   14245 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14246 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14247 			return;
   14248 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14249 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14250 	}
   14251 }
   14252 
   14253 /*
   14254  * Workaround for pch's PHYs
   14255  * XXX should be moved to new PHY driver?
   14256  */
   14257 static void
   14258 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14259 {
   14260 
   14261 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14262 		device_xname(sc->sc_dev), __func__));
   14263 	KASSERT(sc->sc_type == WM_T_PCH);
   14264 
   14265 	if (sc->sc_phytype == WMPHY_82577)
   14266 		wm_set_mdio_slow_mode_hv(sc);
   14267 
   14268 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14269 
   14270 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14271 
   14272 	/* 82578 */
   14273 	if (sc->sc_phytype == WMPHY_82578) {
   14274 		struct mii_softc *child;
   14275 
   14276 		/*
   14277 		 * Return registers to default by doing a soft reset then
   14278 		 * writing 0x3140 to the control register
   14279 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14280 		 */
   14281 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14282 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14283 			PHY_RESET(child);
   14284 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14285 			    0x3140);
   14286 		}
   14287 	}
   14288 
   14289 	/* Select page 0 */
   14290 	sc->phy.acquire(sc);
   14291 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14292 	sc->phy.release(sc);
   14293 
   14294 	/*
   14295 	 * Configure the K1 Si workaround during phy reset assuming there is
   14296 	 * link so that it disables K1 if link is in 1Gbps.
   14297 	 */
   14298 	wm_k1_gig_workaround_hv(sc, 1);
   14299 }
   14300 
   14301 static void
   14302 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14303 {
   14304 
   14305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14306 		device_xname(sc->sc_dev), __func__));
   14307 	KASSERT(sc->sc_type == WM_T_PCH2);
   14308 
   14309 	wm_set_mdio_slow_mode_hv(sc);
   14310 }
   14311 
   14312 static int
   14313 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14314 {
   14315 	int k1_enable = sc->sc_nvm_k1_enabled;
   14316 
   14317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14318 		device_xname(sc->sc_dev), __func__));
   14319 
   14320 	if (sc->phy.acquire(sc) != 0)
   14321 		return -1;
   14322 
   14323 	if (link) {
   14324 		k1_enable = 0;
   14325 
   14326 		/* Link stall fix for link up */
   14327 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14328 		    0x0100);
   14329 	} else {
   14330 		/* Link stall fix for link down */
   14331 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14332 		    0x4100);
   14333 	}
   14334 
   14335 	wm_configure_k1_ich8lan(sc, k1_enable);
   14336 	sc->phy.release(sc);
   14337 
   14338 	return 0;
   14339 }
   14340 
   14341 static void
   14342 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14343 {
   14344 	uint32_t reg;
   14345 
   14346 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14347 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14348 	    reg | HV_KMRN_MDIO_SLOW);
   14349 }
   14350 
   14351 static void
   14352 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14353 {
   14354 	uint32_t ctrl, ctrl_ext, tmp;
   14355 	uint16_t kmreg;
   14356 	int rv;
   14357 
   14358 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14359 	if (rv != 0)
   14360 		return;
   14361 
   14362 	if (k1_enable)
   14363 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14364 	else
   14365 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14366 
   14367 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14368 	if (rv != 0)
   14369 		return;
   14370 
   14371 	delay(20);
   14372 
   14373 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14374 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14375 
   14376 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14377 	tmp |= CTRL_FRCSPD;
   14378 
   14379 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14380 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14381 	CSR_WRITE_FLUSH(sc);
   14382 	delay(20);
   14383 
   14384 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14385 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14386 	CSR_WRITE_FLUSH(sc);
   14387 	delay(20);
   14388 
   14389 	return;
   14390 }
   14391 
   14392 /* special case - for 82575 - need to do manual init ... */
   14393 static void
   14394 wm_reset_init_script_82575(struct wm_softc *sc)
   14395 {
   14396 	/*
   14397 	 * remark: this is untested code - we have no board without EEPROM
   14398 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14399 	 */
   14400 
   14401 	/* SerDes configuration via SERDESCTRL */
   14402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14403 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14404 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14405 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14406 
   14407 	/* CCM configuration via CCMCTL register */
   14408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14410 
   14411 	/* PCIe lanes configuration */
   14412 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14413 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14414 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14415 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14416 
   14417 	/* PCIe PLL Configuration */
   14418 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14419 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14420 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14421 }
   14422 
   14423 static void
   14424 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14425 {
   14426 	uint32_t reg;
   14427 	uint16_t nvmword;
   14428 	int rv;
   14429 
   14430 	if (sc->sc_type != WM_T_82580)
   14431 		return;
   14432 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14433 		return;
   14434 
   14435 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14436 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14437 	if (rv != 0) {
   14438 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14439 		    __func__);
   14440 		return;
   14441 	}
   14442 
   14443 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14444 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14445 		reg |= MDICNFG_DEST;
   14446 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14447 		reg |= MDICNFG_COM_MDIO;
   14448 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14449 }
   14450 
   14451 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14452 
   14453 static bool
   14454 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14455 {
   14456 	int i;
   14457 	uint32_t reg;
   14458 	uint16_t id1, id2;
   14459 
   14460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14461 		device_xname(sc->sc_dev), __func__));
   14462 	id1 = id2 = 0xffff;
   14463 	for (i = 0; i < 2; i++) {
   14464 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14465 		if (MII_INVALIDID(id1))
   14466 			continue;
   14467 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14468 		if (MII_INVALIDID(id2))
   14469 			continue;
   14470 		break;
   14471 	}
   14472 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14473 		goto out;
   14474 	}
   14475 
   14476 	if (sc->sc_type < WM_T_PCH_LPT) {
   14477 		sc->phy.release(sc);
   14478 		wm_set_mdio_slow_mode_hv(sc);
   14479 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14480 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14481 		sc->phy.acquire(sc);
   14482 	}
   14483 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14484 		printf("XXX return with false\n");
   14485 		return false;
   14486 	}
   14487 out:
   14488 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14489 		/* Only unforce SMBus if ME is not active */
   14490 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14491 			/* Unforce SMBus mode in PHY */
   14492 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14493 			    CV_SMB_CTRL);
   14494 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14495 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14496 			    CV_SMB_CTRL, reg);
   14497 
   14498 			/* Unforce SMBus mode in MAC */
   14499 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14500 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14501 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14502 		}
   14503 	}
   14504 	return true;
   14505 }
   14506 
   14507 static void
   14508 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14509 {
   14510 	uint32_t reg;
   14511 	int i;
   14512 
   14513 	/* Set PHY Config Counter to 50msec */
   14514 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14515 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14516 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14517 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14518 
   14519 	/* Toggle LANPHYPC */
   14520 	reg = CSR_READ(sc, WMREG_CTRL);
   14521 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14522 	reg &= ~CTRL_LANPHYPC_VALUE;
   14523 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14524 	CSR_WRITE_FLUSH(sc);
   14525 	delay(1000);
   14526 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14527 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14528 	CSR_WRITE_FLUSH(sc);
   14529 
   14530 	if (sc->sc_type < WM_T_PCH_LPT)
   14531 		delay(50 * 1000);
   14532 	else {
   14533 		i = 20;
   14534 
   14535 		do {
   14536 			delay(5 * 1000);
   14537 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14538 		    && i--);
   14539 
   14540 		delay(30 * 1000);
   14541 	}
   14542 }
   14543 
   14544 static int
   14545 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14546 {
   14547 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14548 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14549 	uint32_t rxa;
   14550 	uint16_t scale = 0, lat_enc = 0;
   14551 	int32_t obff_hwm = 0;
   14552 	int64_t lat_ns, value;
   14553 
   14554 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14555 		device_xname(sc->sc_dev), __func__));
   14556 
   14557 	if (link) {
   14558 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14559 		uint32_t status;
   14560 		uint16_t speed;
   14561 		pcireg_t preg;
   14562 
   14563 		status = CSR_READ(sc, WMREG_STATUS);
   14564 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14565 		case STATUS_SPEED_10:
   14566 			speed = 10;
   14567 			break;
   14568 		case STATUS_SPEED_100:
   14569 			speed = 100;
   14570 			break;
   14571 		case STATUS_SPEED_1000:
   14572 			speed = 1000;
   14573 			break;
   14574 		default:
   14575 			device_printf(sc->sc_dev, "Unknown speed "
   14576 			    "(status = %08x)\n", status);
   14577 			return -1;
   14578 		}
   14579 
   14580 		/* Rx Packet Buffer Allocation size (KB) */
   14581 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14582 
   14583 		/*
   14584 		 * Determine the maximum latency tolerated by the device.
   14585 		 *
   14586 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14587 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14588 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14589 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14590 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14591 		 */
   14592 		lat_ns = ((int64_t)rxa * 1024 -
   14593 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14594 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14595 		if (lat_ns < 0)
   14596 			lat_ns = 0;
   14597 		else
   14598 			lat_ns /= speed;
   14599 		value = lat_ns;
   14600 
   14601 		while (value > LTRV_VALUE) {
   14602 			scale ++;
   14603 			value = howmany(value, __BIT(5));
   14604 		}
   14605 		if (scale > LTRV_SCALE_MAX) {
   14606 			printf("%s: Invalid LTR latency scale %d\n",
   14607 			    device_xname(sc->sc_dev), scale);
   14608 			return -1;
   14609 		}
   14610 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14611 
   14612 		/* Determine the maximum latency tolerated by the platform */
   14613 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14614 		    WM_PCI_LTR_CAP_LPT);
   14615 		max_snoop = preg & 0xffff;
   14616 		max_nosnoop = preg >> 16;
   14617 
   14618 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14619 
   14620 		if (lat_enc > max_ltr_enc) {
   14621 			lat_enc = max_ltr_enc;
   14622 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14623 			    * PCI_LTR_SCALETONS(
   14624 				    __SHIFTOUT(lat_enc,
   14625 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14626 		}
   14627 
   14628 		if (lat_ns) {
   14629 			lat_ns *= speed * 1000;
   14630 			lat_ns /= 8;
   14631 			lat_ns /= 1000000000;
   14632 			obff_hwm = (int32_t)(rxa - lat_ns);
   14633 		}
   14634 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14635 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14636 			    "(rxa = %d, lat_ns = %d)\n",
   14637 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14638 			return -1;
   14639 		}
   14640 	}
   14641 	/* Snoop and No-Snoop latencies the same */
   14642 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14643 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14644 
   14645 	/* Set OBFF high water mark */
   14646 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14647 	reg |= obff_hwm;
   14648 	CSR_WRITE(sc, WMREG_SVT, reg);
   14649 
   14650 	/* Enable OBFF */
   14651 	reg = CSR_READ(sc, WMREG_SVCR);
   14652 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14653 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14654 
   14655 	return 0;
   14656 }
   14657 
   14658 /*
   14659  * I210 Errata 25 and I211 Errata 10
   14660  * Slow System Clock.
   14661  */
   14662 static void
   14663 wm_pll_workaround_i210(struct wm_softc *sc)
   14664 {
   14665 	uint32_t mdicnfg, wuc;
   14666 	uint32_t reg;
   14667 	pcireg_t pcireg;
   14668 	uint32_t pmreg;
   14669 	uint16_t nvmword, tmp_nvmword;
   14670 	int phyval;
   14671 	bool wa_done = false;
   14672 	int i;
   14673 
   14674 	/* Save WUC and MDICNFG registers */
   14675 	wuc = CSR_READ(sc, WMREG_WUC);
   14676 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14677 
   14678 	reg = mdicnfg & ~MDICNFG_DEST;
   14679 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14680 
   14681 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14682 		nvmword = INVM_DEFAULT_AL;
   14683 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14684 
   14685 	/* Get Power Management cap offset */
   14686 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14687 		&pmreg, NULL) == 0)
   14688 		return;
   14689 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14690 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14691 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14692 
   14693 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14694 			break; /* OK */
   14695 		}
   14696 
   14697 		wa_done = true;
   14698 		/* Directly reset the internal PHY */
   14699 		reg = CSR_READ(sc, WMREG_CTRL);
   14700 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14701 
   14702 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14703 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14704 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14705 
   14706 		CSR_WRITE(sc, WMREG_WUC, 0);
   14707 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14708 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14709 
   14710 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14711 		    pmreg + PCI_PMCSR);
   14712 		pcireg |= PCI_PMCSR_STATE_D3;
   14713 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14714 		    pmreg + PCI_PMCSR, pcireg);
   14715 		delay(1000);
   14716 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14717 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14718 		    pmreg + PCI_PMCSR, pcireg);
   14719 
   14720 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14721 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14722 
   14723 		/* Restore WUC register */
   14724 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14725 	}
   14726 
   14727 	/* Restore MDICNFG setting */
   14728 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14729 	if (wa_done)
   14730 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14731 }
   14732 
   14733 static void
   14734 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14735 {
   14736 	uint32_t reg;
   14737 
   14738 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14739 		device_xname(sc->sc_dev), __func__));
   14740 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14741 
   14742 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14743 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14744 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14745 
   14746 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14747 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14748 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14749 }
   14750