Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.536
      1 /*	$NetBSD: if_wm.c,v 1.536 2017/07/28 10:34:58 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.536 2017/07/28 10:34:58 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_turnon(struct wm_softc *);
    710 static void	wm_turnoff(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		/*
   2082 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2083 		 * on 8254[17], so set flags and functios before calling it.
   2084 		 */
   2085 		sc->sc_flags |= WM_F_LOCK_EECD;
   2086 		sc->nvm.acquire = wm_get_eecd;
   2087 		sc->nvm.release = wm_put_eecd;
   2088 		if (reg & EECD_EE_TYPE) {
   2089 			/* SPI */
   2090 			sc->nvm.read = wm_nvm_read_spi;
   2091 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 			wm_nvm_set_addrbits_size_eecd(sc);
   2093 		} else {
   2094 			/* Microwire */
   2095 			sc->nvm.read = wm_nvm_read_uwire;
   2096 			if ((reg & EECD_EE_ABITS) != 0) {
   2097 				sc->sc_nvm_wordsize = 256;
   2098 				sc->sc_nvm_addrbits = 8;
   2099 			} else {
   2100 				sc->sc_nvm_wordsize = 64;
   2101 				sc->sc_nvm_addrbits = 6;
   2102 			}
   2103 		}
   2104 		break;
   2105 	case WM_T_82571:
   2106 	case WM_T_82572:
   2107 		/* SPI */
   2108 		sc->nvm.read = wm_nvm_read_eerd;
   2109 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2110 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 		wm_nvm_set_addrbits_size_eecd(sc);
   2112 		sc->phy.acquire = wm_get_swsm_semaphore;
   2113 		sc->phy.release = wm_put_swsm_semaphore;
   2114 		sc->nvm.acquire = wm_get_nvm_82571;
   2115 		sc->nvm.release = wm_put_nvm_82571;
   2116 		break;
   2117 	case WM_T_82573:
   2118 	case WM_T_82574:
   2119 	case WM_T_82583:
   2120 		sc->nvm.read = wm_nvm_read_eerd;
   2121 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2122 		if (sc->sc_type == WM_T_82573) {
   2123 			sc->phy.acquire = wm_get_swsm_semaphore;
   2124 			sc->phy.release = wm_put_swsm_semaphore;
   2125 			sc->nvm.acquire = wm_get_nvm_82571;
   2126 			sc->nvm.release = wm_put_nvm_82571;
   2127 		} else {
   2128 			/* Both PHY and NVM use the same semaphore. */
   2129 			sc->phy.acquire = sc->nvm.acquire
   2130 			    = wm_get_swfwhw_semaphore;
   2131 			sc->phy.release = sc->nvm.release
   2132 			    = wm_put_swfwhw_semaphore;
   2133 		}
   2134 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2135 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2136 			sc->sc_nvm_wordsize = 2048;
   2137 		} else {
   2138 			/* SPI */
   2139 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2140 			wm_nvm_set_addrbits_size_eecd(sc);
   2141 		}
   2142 		break;
   2143 	case WM_T_82575:
   2144 	case WM_T_82576:
   2145 	case WM_T_82580:
   2146 	case WM_T_I350:
   2147 	case WM_T_I354:
   2148 	case WM_T_80003:
   2149 		/* SPI */
   2150 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2151 		wm_nvm_set_addrbits_size_eecd(sc);
   2152 		if((sc->sc_type == WM_T_80003)
   2153 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2154 			sc->nvm.read = wm_nvm_read_eerd;
   2155 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2156 		} else {
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		}
   2160 		sc->phy.acquire = wm_get_phy_82575;
   2161 		sc->phy.release = wm_put_phy_82575;
   2162 		sc->nvm.acquire = wm_get_nvm_80003;
   2163 		sc->nvm.release = wm_put_nvm_80003;
   2164 		break;
   2165 	case WM_T_ICH8:
   2166 	case WM_T_ICH9:
   2167 	case WM_T_ICH10:
   2168 	case WM_T_PCH:
   2169 	case WM_T_PCH2:
   2170 	case WM_T_PCH_LPT:
   2171 		sc->nvm.read = wm_nvm_read_ich8;
   2172 		/* FLASH */
   2173 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2174 		sc->sc_nvm_wordsize = 2048;
   2175 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2176 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2177 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2178 			aprint_error_dev(sc->sc_dev,
   2179 			    "can't map FLASH registers\n");
   2180 			goto out;
   2181 		}
   2182 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2183 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2184 		    ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size =
   2186 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2187 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2188 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2189 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2190 		sc->sc_flashreg_offset = 0;
   2191 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2192 		sc->phy.release = wm_put_swflag_ich8lan;
   2193 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2194 		sc->nvm.release = wm_put_nvm_ich8lan;
   2195 		break;
   2196 	case WM_T_PCH_SPT:
   2197 		sc->nvm.read = wm_nvm_read_spt;
   2198 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2199 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2200 		sc->sc_flasht = sc->sc_st;
   2201 		sc->sc_flashh = sc->sc_sh;
   2202 		sc->sc_ich8_flash_base = 0;
   2203 		sc->sc_nvm_wordsize =
   2204 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2205 			* NVM_SIZE_MULTIPLIER;
   2206 		/* It is size in bytes, we want words */
   2207 		sc->sc_nvm_wordsize /= 2;
   2208 		/* assume 2 banks */
   2209 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2210 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2211 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2212 		sc->phy.release = wm_put_swflag_ich8lan;
   2213 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2214 		sc->nvm.release = wm_put_nvm_ich8lan;
   2215 		break;
   2216 	case WM_T_I210:
   2217 	case WM_T_I211:
   2218 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2219 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2220 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2224 			wm_nvm_set_addrbits_size_eecd(sc);
   2225 		} else {
   2226 			sc->nvm.read = wm_nvm_read_invm;
   2227 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2228 			sc->sc_nvm_wordsize = INVM_SIZE;
   2229 		}
   2230 		sc->phy.acquire = wm_get_phy_82575;
   2231 		sc->phy.release = wm_put_phy_82575;
   2232 		sc->nvm.acquire = wm_get_nvm_80003;
   2233 		sc->nvm.release = wm_put_nvm_80003;
   2234 		break;
   2235 	default:
   2236 		break;
   2237 	}
   2238 
   2239 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2240 	switch (sc->sc_type) {
   2241 	case WM_T_82571:
   2242 	case WM_T_82572:
   2243 		reg = CSR_READ(sc, WMREG_SWSM2);
   2244 		if ((reg & SWSM2_LOCK) == 0) {
   2245 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2246 			force_clear_smbi = true;
   2247 		} else
   2248 			force_clear_smbi = false;
   2249 		break;
   2250 	case WM_T_82573:
   2251 	case WM_T_82574:
   2252 	case WM_T_82583:
   2253 		force_clear_smbi = true;
   2254 		break;
   2255 	default:
   2256 		force_clear_smbi = false;
   2257 		break;
   2258 	}
   2259 	if (force_clear_smbi) {
   2260 		reg = CSR_READ(sc, WMREG_SWSM);
   2261 		if ((reg & SWSM_SMBI) != 0)
   2262 			aprint_error_dev(sc->sc_dev,
   2263 			    "Please update the Bootagent\n");
   2264 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2265 	}
   2266 
   2267 	/*
   2268 	 * Defer printing the EEPROM type until after verifying the checksum
   2269 	 * This allows the EEPROM type to be printed correctly in the case
   2270 	 * that no EEPROM is attached.
   2271 	 */
   2272 	/*
   2273 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2274 	 * this for later, so we can fail future reads from the EEPROM.
   2275 	 */
   2276 	if (wm_nvm_validate_checksum(sc)) {
   2277 		/*
   2278 		 * Read twice again because some PCI-e parts fail the
   2279 		 * first check due to the link being in sleep state.
   2280 		 */
   2281 		if (wm_nvm_validate_checksum(sc))
   2282 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2283 	}
   2284 
   2285 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2286 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2287 	else {
   2288 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2289 		    sc->sc_nvm_wordsize);
   2290 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2291 			aprint_verbose("iNVM");
   2292 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2293 			aprint_verbose("FLASH(HW)");
   2294 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2295 			aprint_verbose("FLASH");
   2296 		else {
   2297 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2298 				eetype = "SPI";
   2299 			else
   2300 				eetype = "MicroWire";
   2301 			aprint_verbose("(%d address bits) %s EEPROM",
   2302 			    sc->sc_nvm_addrbits, eetype);
   2303 		}
   2304 	}
   2305 	wm_nvm_version(sc);
   2306 	aprint_verbose("\n");
   2307 
   2308 	/*
   2309 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2310 	 * incorrect.
   2311 	 */
   2312 	wm_gmii_setup_phytype(sc, 0, 0);
   2313 
   2314 	/* Reset the chip to a known state. */
   2315 	wm_reset(sc);
   2316 
   2317 	/* Check for I21[01] PLL workaround */
   2318 	if (sc->sc_type == WM_T_I210)
   2319 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2320 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2321 		/* NVM image release 3.25 has a workaround */
   2322 		if ((sc->sc_nvm_ver_major < 3)
   2323 		    || ((sc->sc_nvm_ver_major == 3)
   2324 			&& (sc->sc_nvm_ver_minor < 25))) {
   2325 			aprint_verbose_dev(sc->sc_dev,
   2326 			    "ROM image version %d.%d is older than 3.25\n",
   2327 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2328 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2329 		}
   2330 	}
   2331 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2332 		wm_pll_workaround_i210(sc);
   2333 
   2334 	wm_get_wakeup(sc);
   2335 
   2336 	/* Non-AMT based hardware can now take control from firmware */
   2337 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2338 		wm_get_hw_control(sc);
   2339 
   2340 	/*
   2341 	 * Read the Ethernet address from the EEPROM, if not first found
   2342 	 * in device properties.
   2343 	 */
   2344 	ea = prop_dictionary_get(dict, "mac-address");
   2345 	if (ea != NULL) {
   2346 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2347 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2348 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2349 	} else {
   2350 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2351 			aprint_error_dev(sc->sc_dev,
   2352 			    "unable to read Ethernet address\n");
   2353 			goto out;
   2354 		}
   2355 	}
   2356 
   2357 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2358 	    ether_sprintf(enaddr));
   2359 
   2360 	/*
   2361 	 * Read the config info from the EEPROM, and set up various
   2362 	 * bits in the control registers based on their contents.
   2363 	 */
   2364 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2365 	if (pn != NULL) {
   2366 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2367 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2368 	} else {
   2369 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2370 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2371 			goto out;
   2372 		}
   2373 	}
   2374 
   2375 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2376 	if (pn != NULL) {
   2377 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2378 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2379 	} else {
   2380 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2381 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2382 			goto out;
   2383 		}
   2384 	}
   2385 
   2386 	/* check for WM_F_WOL */
   2387 	switch (sc->sc_type) {
   2388 	case WM_T_82542_2_0:
   2389 	case WM_T_82542_2_1:
   2390 	case WM_T_82543:
   2391 		/* dummy? */
   2392 		eeprom_data = 0;
   2393 		apme_mask = NVM_CFG3_APME;
   2394 		break;
   2395 	case WM_T_82544:
   2396 		apme_mask = NVM_CFG2_82544_APM_EN;
   2397 		eeprom_data = cfg2;
   2398 		break;
   2399 	case WM_T_82546:
   2400 	case WM_T_82546_3:
   2401 	case WM_T_82571:
   2402 	case WM_T_82572:
   2403 	case WM_T_82573:
   2404 	case WM_T_82574:
   2405 	case WM_T_82583:
   2406 	case WM_T_80003:
   2407 	default:
   2408 		apme_mask = NVM_CFG3_APME;
   2409 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2410 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2411 		break;
   2412 	case WM_T_82575:
   2413 	case WM_T_82576:
   2414 	case WM_T_82580:
   2415 	case WM_T_I350:
   2416 	case WM_T_I354: /* XXX ok? */
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 		/* XXX The funcid should be checked on some devices */
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		break;
   2428 	}
   2429 
   2430 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2431 	if ((eeprom_data & apme_mask) != 0)
   2432 		sc->sc_flags |= WM_F_WOL;
   2433 
   2434 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2435 		/* Check NVM for autonegotiation */
   2436 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2437 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2438 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2439 		}
   2440 	}
   2441 
   2442 	/*
   2443 	 * XXX need special handling for some multiple port cards
   2444 	 * to disable a paticular port.
   2445 	 */
   2446 
   2447 	if (sc->sc_type >= WM_T_82544) {
   2448 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2449 		if (pn != NULL) {
   2450 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2451 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2452 		} else {
   2453 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2454 				aprint_error_dev(sc->sc_dev,
   2455 				    "unable to read SWDPIN\n");
   2456 				goto out;
   2457 			}
   2458 		}
   2459 	}
   2460 
   2461 	if (cfg1 & NVM_CFG1_ILOS)
   2462 		sc->sc_ctrl |= CTRL_ILOS;
   2463 
   2464 	/*
   2465 	 * XXX
   2466 	 * This code isn't correct because pin 2 and 3 are located
   2467 	 * in different position on newer chips. Check all datasheet.
   2468 	 *
   2469 	 * Until resolve this problem, check if a chip < 82580
   2470 	 */
   2471 	if (sc->sc_type <= WM_T_82580) {
   2472 		if (sc->sc_type >= WM_T_82544) {
   2473 			sc->sc_ctrl |=
   2474 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2475 			    CTRL_SWDPIO_SHIFT;
   2476 			sc->sc_ctrl |=
   2477 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2478 			    CTRL_SWDPINS_SHIFT;
   2479 		} else {
   2480 			sc->sc_ctrl |=
   2481 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2482 			    CTRL_SWDPIO_SHIFT;
   2483 		}
   2484 	}
   2485 
   2486 	/* XXX For other than 82580? */
   2487 	if (sc->sc_type == WM_T_82580) {
   2488 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2489 		if (nvmword & __BIT(13))
   2490 			sc->sc_ctrl |= CTRL_ILOS;
   2491 	}
   2492 
   2493 #if 0
   2494 	if (sc->sc_type >= WM_T_82544) {
   2495 		if (cfg1 & NVM_CFG1_IPS0)
   2496 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2497 		if (cfg1 & NVM_CFG1_IPS1)
   2498 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2499 		sc->sc_ctrl_ext |=
   2500 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2501 		    CTRL_EXT_SWDPIO_SHIFT;
   2502 		sc->sc_ctrl_ext |=
   2503 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2504 		    CTRL_EXT_SWDPINS_SHIFT;
   2505 	} else {
   2506 		sc->sc_ctrl_ext |=
   2507 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2508 		    CTRL_EXT_SWDPIO_SHIFT;
   2509 	}
   2510 #endif
   2511 
   2512 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2513 #if 0
   2514 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2515 #endif
   2516 
   2517 	if (sc->sc_type == WM_T_PCH) {
   2518 		uint16_t val;
   2519 
   2520 		/* Save the NVM K1 bit setting */
   2521 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2522 
   2523 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2524 			sc->sc_nvm_k1_enabled = 1;
   2525 		else
   2526 			sc->sc_nvm_k1_enabled = 0;
   2527 	}
   2528 
   2529 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2530 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2531 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2532 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2533 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2534 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2535 		/* Copper only */
   2536 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2537 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2538 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2539 	    || (sc->sc_type ==WM_T_I211)) {
   2540 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2541 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2542 		switch (link_mode) {
   2543 		case CTRL_EXT_LINK_MODE_1000KX:
   2544 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2546 			break;
   2547 		case CTRL_EXT_LINK_MODE_SGMII:
   2548 			if (wm_sgmii_uses_mdio(sc)) {
   2549 				aprint_verbose_dev(sc->sc_dev,
   2550 				    "SGMII(MDIO)\n");
   2551 				sc->sc_flags |= WM_F_SGMII;
   2552 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2553 				break;
   2554 			}
   2555 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2556 			/*FALLTHROUGH*/
   2557 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2558 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2559 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2560 				if (link_mode
   2561 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2562 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2563 					sc->sc_flags |= WM_F_SGMII;
   2564 				} else {
   2565 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2566 					aprint_verbose_dev(sc->sc_dev,
   2567 					    "SERDES\n");
   2568 				}
   2569 				break;
   2570 			}
   2571 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2572 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2573 
   2574 			/* Change current link mode setting */
   2575 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2576 			switch (sc->sc_mediatype) {
   2577 			case WM_MEDIATYPE_COPPER:
   2578 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2579 				break;
   2580 			case WM_MEDIATYPE_SERDES:
   2581 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2582 				break;
   2583 			default:
   2584 				break;
   2585 			}
   2586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2587 			break;
   2588 		case CTRL_EXT_LINK_MODE_GMII:
   2589 		default:
   2590 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2591 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2592 			break;
   2593 		}
   2594 
   2595 		reg &= ~CTRL_EXT_I2C_ENA;
   2596 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2597 			reg |= CTRL_EXT_I2C_ENA;
   2598 		else
   2599 			reg &= ~CTRL_EXT_I2C_ENA;
   2600 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2601 	} else if (sc->sc_type < WM_T_82543 ||
   2602 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2603 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2604 			aprint_error_dev(sc->sc_dev,
   2605 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2607 		}
   2608 	} else {
   2609 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2610 			aprint_error_dev(sc->sc_dev,
   2611 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2612 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2613 		}
   2614 	}
   2615 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2616 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2617 
   2618 	/* Set device properties (macflags) */
   2619 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2620 
   2621 	/* Initialize the media structures accordingly. */
   2622 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2623 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2624 	else
   2625 		wm_tbi_mediainit(sc); /* All others */
   2626 
   2627 	ifp = &sc->sc_ethercom.ec_if;
   2628 	xname = device_xname(sc->sc_dev);
   2629 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2630 	ifp->if_softc = sc;
   2631 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2632 #ifdef WM_MPSAFE
   2633 	ifp->if_extflags = IFEF_START_MPSAFE;
   2634 #endif
   2635 	ifp->if_ioctl = wm_ioctl;
   2636 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2637 		ifp->if_start = wm_nq_start;
   2638 		/*
   2639 		 * When the number of CPUs is one and the controller can use
   2640 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2641 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2642 		 * and the other is used for link status changing.
   2643 		 * In this situation, wm_nq_transmit() is disadvantageous
   2644 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2645 		 */
   2646 		if (wm_is_using_multiqueue(sc))
   2647 			ifp->if_transmit = wm_nq_transmit;
   2648 	} else {
   2649 		ifp->if_start = wm_start;
   2650 		/*
   2651 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2652 		 */
   2653 		if (wm_is_using_multiqueue(sc))
   2654 			ifp->if_transmit = wm_transmit;
   2655 	}
   2656 	ifp->if_watchdog = wm_watchdog;
   2657 	ifp->if_init = wm_init;
   2658 	ifp->if_stop = wm_stop;
   2659 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2660 	IFQ_SET_READY(&ifp->if_snd);
   2661 
   2662 	/* Check for jumbo frame */
   2663 	switch (sc->sc_type) {
   2664 	case WM_T_82573:
   2665 		/* XXX limited to 9234 if ASPM is disabled */
   2666 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2667 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2668 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2669 		break;
   2670 	case WM_T_82571:
   2671 	case WM_T_82572:
   2672 	case WM_T_82574:
   2673 	case WM_T_82575:
   2674 	case WM_T_82576:
   2675 	case WM_T_82580:
   2676 	case WM_T_I350:
   2677 	case WM_T_I354: /* XXXX ok? */
   2678 	case WM_T_I210:
   2679 	case WM_T_I211:
   2680 	case WM_T_80003:
   2681 	case WM_T_ICH9:
   2682 	case WM_T_ICH10:
   2683 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2684 	case WM_T_PCH_LPT:
   2685 	case WM_T_PCH_SPT:
   2686 		/* XXX limited to 9234 */
   2687 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2688 		break;
   2689 	case WM_T_PCH:
   2690 		/* XXX limited to 4096 */
   2691 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2692 		break;
   2693 	case WM_T_82542_2_0:
   2694 	case WM_T_82542_2_1:
   2695 	case WM_T_82583:
   2696 	case WM_T_ICH8:
   2697 		/* No support for jumbo frame */
   2698 		break;
   2699 	default:
   2700 		/* ETHER_MAX_LEN_JUMBO */
   2701 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2702 		break;
   2703 	}
   2704 
   2705 	/* If we're a i82543 or greater, we can support VLANs. */
   2706 	if (sc->sc_type >= WM_T_82543)
   2707 		sc->sc_ethercom.ec_capabilities |=
   2708 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2709 
   2710 	/*
   2711 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2712 	 * on i82543 and later.
   2713 	 */
   2714 	if (sc->sc_type >= WM_T_82543) {
   2715 		ifp->if_capabilities |=
   2716 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2717 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2718 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2719 		    IFCAP_CSUM_TCPv6_Tx |
   2720 		    IFCAP_CSUM_UDPv6_Tx;
   2721 	}
   2722 
   2723 	/*
   2724 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2725 	 *
   2726 	 *	82541GI (8086:1076) ... no
   2727 	 *	82572EI (8086:10b9) ... yes
   2728 	 */
   2729 	if (sc->sc_type >= WM_T_82571) {
   2730 		ifp->if_capabilities |=
   2731 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2732 	}
   2733 
   2734 	/*
   2735 	 * If we're a i82544 or greater (except i82547), we can do
   2736 	 * TCP segmentation offload.
   2737 	 */
   2738 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2739 		ifp->if_capabilities |= IFCAP_TSOv4;
   2740 	}
   2741 
   2742 	if (sc->sc_type >= WM_T_82571) {
   2743 		ifp->if_capabilities |= IFCAP_TSOv6;
   2744 	}
   2745 
   2746 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2747 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2748 
   2749 #ifdef WM_MPSAFE
   2750 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2751 #else
   2752 	sc->sc_core_lock = NULL;
   2753 #endif
   2754 
   2755 	/* Attach the interface. */
   2756 	if_initialize(ifp);
   2757 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2758 	ether_ifattach(ifp, enaddr);
   2759 	if_register(ifp);
   2760 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2761 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2762 			  RND_FLAG_DEFAULT);
   2763 
   2764 #ifdef WM_EVENT_COUNTERS
   2765 	/* Attach event counters. */
   2766 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2767 	    NULL, xname, "linkintr");
   2768 
   2769 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2770 	    NULL, xname, "tx_xoff");
   2771 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2772 	    NULL, xname, "tx_xon");
   2773 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2774 	    NULL, xname, "rx_xoff");
   2775 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2776 	    NULL, xname, "rx_xon");
   2777 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2778 	    NULL, xname, "rx_macctl");
   2779 #endif /* WM_EVENT_COUNTERS */
   2780 
   2781 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2782 		pmf_class_network_register(self, ifp);
   2783 	else
   2784 		aprint_error_dev(self, "couldn't establish power handler\n");
   2785 
   2786 	sc->sc_flags |= WM_F_ATTACHED;
   2787  out:
   2788 	return;
   2789 }
   2790 
   2791 /* The detach function (ca_detach) */
   2792 static int
   2793 wm_detach(device_t self, int flags __unused)
   2794 {
   2795 	struct wm_softc *sc = device_private(self);
   2796 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2797 	int i;
   2798 
   2799 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2800 		return 0;
   2801 
   2802 	/* Stop the interface. Callouts are stopped in it. */
   2803 	wm_stop(ifp, 1);
   2804 
   2805 	pmf_device_deregister(self);
   2806 
   2807 #ifdef WM_EVENT_COUNTERS
   2808 	evcnt_detach(&sc->sc_ev_linkintr);
   2809 
   2810 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2811 	evcnt_detach(&sc->sc_ev_tx_xon);
   2812 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2813 	evcnt_detach(&sc->sc_ev_rx_xon);
   2814 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2815 #endif /* WM_EVENT_COUNTERS */
   2816 
   2817 	/* Tell the firmware about the release */
   2818 	WM_CORE_LOCK(sc);
   2819 	wm_release_manageability(sc);
   2820 	wm_release_hw_control(sc);
   2821 	wm_enable_wakeup(sc);
   2822 	WM_CORE_UNLOCK(sc);
   2823 
   2824 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2825 
   2826 	/* Delete all remaining media. */
   2827 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2828 
   2829 	ether_ifdetach(ifp);
   2830 	if_detach(ifp);
   2831 	if_percpuq_destroy(sc->sc_ipq);
   2832 
   2833 	/* Unload RX dmamaps and free mbufs */
   2834 	for (i = 0; i < sc->sc_nqueues; i++) {
   2835 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2836 		mutex_enter(rxq->rxq_lock);
   2837 		wm_rxdrain(rxq);
   2838 		mutex_exit(rxq->rxq_lock);
   2839 	}
   2840 	/* Must unlock here */
   2841 
   2842 	/* Disestablish the interrupt handler */
   2843 	for (i = 0; i < sc->sc_nintrs; i++) {
   2844 		if (sc->sc_ihs[i] != NULL) {
   2845 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2846 			sc->sc_ihs[i] = NULL;
   2847 		}
   2848 	}
   2849 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2850 
   2851 	wm_free_txrx_queues(sc);
   2852 
   2853 	/* Unmap the registers */
   2854 	if (sc->sc_ss) {
   2855 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2856 		sc->sc_ss = 0;
   2857 	}
   2858 	if (sc->sc_ios) {
   2859 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2860 		sc->sc_ios = 0;
   2861 	}
   2862 	if (sc->sc_flashs) {
   2863 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2864 		sc->sc_flashs = 0;
   2865 	}
   2866 
   2867 	if (sc->sc_core_lock)
   2868 		mutex_obj_free(sc->sc_core_lock);
   2869 	if (sc->sc_ich_phymtx)
   2870 		mutex_obj_free(sc->sc_ich_phymtx);
   2871 	if (sc->sc_ich_nvmmtx)
   2872 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2873 
   2874 	return 0;
   2875 }
   2876 
   2877 static bool
   2878 wm_suspend(device_t self, const pmf_qual_t *qual)
   2879 {
   2880 	struct wm_softc *sc = device_private(self);
   2881 
   2882 	wm_release_manageability(sc);
   2883 	wm_release_hw_control(sc);
   2884 	wm_enable_wakeup(sc);
   2885 
   2886 	return true;
   2887 }
   2888 
   2889 static bool
   2890 wm_resume(device_t self, const pmf_qual_t *qual)
   2891 {
   2892 	struct wm_softc *sc = device_private(self);
   2893 
   2894 	wm_init_manageability(sc);
   2895 
   2896 	return true;
   2897 }
   2898 
   2899 /*
   2900  * wm_watchdog:		[ifnet interface function]
   2901  *
   2902  *	Watchdog timer handler.
   2903  */
   2904 static void
   2905 wm_watchdog(struct ifnet *ifp)
   2906 {
   2907 	int qid;
   2908 	struct wm_softc *sc = ifp->if_softc;
   2909 
   2910 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2911 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2912 
   2913 		wm_watchdog_txq(ifp, txq);
   2914 	}
   2915 
   2916 	/* Reset the interface. */
   2917 	(void) wm_init(ifp);
   2918 
   2919 	/*
   2920 	 * There are still some upper layer processing which call
   2921 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2922 	 */
   2923 	/* Try to get more packets going. */
   2924 	ifp->if_start(ifp);
   2925 }
   2926 
   2927 static void
   2928 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2929 {
   2930 	struct wm_softc *sc = ifp->if_softc;
   2931 
   2932 	/*
   2933 	 * Since we're using delayed interrupts, sweep up
   2934 	 * before we report an error.
   2935 	 */
   2936 	mutex_enter(txq->txq_lock);
   2937 	wm_txeof(sc, txq);
   2938 	mutex_exit(txq->txq_lock);
   2939 
   2940 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2941 #ifdef WM_DEBUG
   2942 		int i, j;
   2943 		struct wm_txsoft *txs;
   2944 #endif
   2945 		log(LOG_ERR,
   2946 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2947 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2948 		    txq->txq_next);
   2949 		ifp->if_oerrors++;
   2950 #ifdef WM_DEBUG
   2951 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2952 		    i = WM_NEXTTXS(txq, i)) {
   2953 		    txs = &txq->txq_soft[i];
   2954 		    printf("txs %d tx %d -> %d\n",
   2955 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2956 		    for (j = txs->txs_firstdesc; ;
   2957 			j = WM_NEXTTX(txq, j)) {
   2958 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2959 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2960 			printf("\t %#08x%08x\n",
   2961 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2962 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2963 			if (j == txs->txs_lastdesc)
   2964 				break;
   2965 			}
   2966 		}
   2967 #endif
   2968 	}
   2969 }
   2970 
   2971 /*
   2972  * wm_tick:
   2973  *
   2974  *	One second timer, used to check link status, sweep up
   2975  *	completed transmit jobs, etc.
   2976  */
   2977 static void
   2978 wm_tick(void *arg)
   2979 {
   2980 	struct wm_softc *sc = arg;
   2981 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2982 #ifndef WM_MPSAFE
   2983 	int s = splnet();
   2984 #endif
   2985 
   2986 	WM_CORE_LOCK(sc);
   2987 
   2988 	if (sc->sc_core_stopping)
   2989 		goto out;
   2990 
   2991 	if (sc->sc_type >= WM_T_82542_2_1) {
   2992 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2993 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2994 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2995 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2996 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2997 	}
   2998 
   2999 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3000 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3001 	    + CSR_READ(sc, WMREG_CRCERRS)
   3002 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3003 	    + CSR_READ(sc, WMREG_SYMERRC)
   3004 	    + CSR_READ(sc, WMREG_RXERRC)
   3005 	    + CSR_READ(sc, WMREG_SEC)
   3006 	    + CSR_READ(sc, WMREG_CEXTERR)
   3007 	    + CSR_READ(sc, WMREG_RLEC);
   3008 	/*
   3009 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3010 	 * memory. It does not mean the number of dropped packet. Because
   3011 	 * ethernet controller can receive packets in such case if there is
   3012 	 * space in phy's FIFO.
   3013 	 *
   3014 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3015 	 * own EVCNT instead of if_iqdrops.
   3016 	 */
   3017 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3018 
   3019 	if (sc->sc_flags & WM_F_HAS_MII)
   3020 		mii_tick(&sc->sc_mii);
   3021 	else if ((sc->sc_type >= WM_T_82575)
   3022 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3023 		wm_serdes_tick(sc);
   3024 	else
   3025 		wm_tbi_tick(sc);
   3026 
   3027 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3028 out:
   3029 	WM_CORE_UNLOCK(sc);
   3030 #ifndef WM_MPSAFE
   3031 	splx(s);
   3032 #endif
   3033 }
   3034 
   3035 static int
   3036 wm_ifflags_cb(struct ethercom *ec)
   3037 {
   3038 	struct ifnet *ifp = &ec->ec_if;
   3039 	struct wm_softc *sc = ifp->if_softc;
   3040 	int rc = 0;
   3041 
   3042 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3043 		device_xname(sc->sc_dev), __func__));
   3044 
   3045 	WM_CORE_LOCK(sc);
   3046 
   3047 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3048 	sc->sc_if_flags = ifp->if_flags;
   3049 
   3050 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3051 		rc = ENETRESET;
   3052 		goto out;
   3053 	}
   3054 
   3055 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3056 		wm_set_filter(sc);
   3057 
   3058 	wm_set_vlan(sc);
   3059 
   3060 out:
   3061 	WM_CORE_UNLOCK(sc);
   3062 
   3063 	return rc;
   3064 }
   3065 
   3066 /*
   3067  * wm_ioctl:		[ifnet interface function]
   3068  *
   3069  *	Handle control requests from the operator.
   3070  */
   3071 static int
   3072 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3073 {
   3074 	struct wm_softc *sc = ifp->if_softc;
   3075 	struct ifreq *ifr = (struct ifreq *) data;
   3076 	struct ifaddr *ifa = (struct ifaddr *)data;
   3077 	struct sockaddr_dl *sdl;
   3078 	int s, error;
   3079 
   3080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3081 		device_xname(sc->sc_dev), __func__));
   3082 
   3083 #ifndef WM_MPSAFE
   3084 	s = splnet();
   3085 #endif
   3086 	switch (cmd) {
   3087 	case SIOCSIFMEDIA:
   3088 	case SIOCGIFMEDIA:
   3089 		WM_CORE_LOCK(sc);
   3090 		/* Flow control requires full-duplex mode. */
   3091 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3092 		    (ifr->ifr_media & IFM_FDX) == 0)
   3093 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3094 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3095 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3096 				/* We can do both TXPAUSE and RXPAUSE. */
   3097 				ifr->ifr_media |=
   3098 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3099 			}
   3100 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3101 		}
   3102 		WM_CORE_UNLOCK(sc);
   3103 #ifdef WM_MPSAFE
   3104 		s = splnet();
   3105 #endif
   3106 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3107 #ifdef WM_MPSAFE
   3108 		splx(s);
   3109 #endif
   3110 		break;
   3111 	case SIOCINITIFADDR:
   3112 		WM_CORE_LOCK(sc);
   3113 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3114 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3115 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3116 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3117 			/* unicast address is first multicast entry */
   3118 			wm_set_filter(sc);
   3119 			error = 0;
   3120 			WM_CORE_UNLOCK(sc);
   3121 			break;
   3122 		}
   3123 		WM_CORE_UNLOCK(sc);
   3124 		/*FALLTHROUGH*/
   3125 	default:
   3126 #ifdef WM_MPSAFE
   3127 		s = splnet();
   3128 #endif
   3129 		/* It may call wm_start, so unlock here */
   3130 		error = ether_ioctl(ifp, cmd, data);
   3131 #ifdef WM_MPSAFE
   3132 		splx(s);
   3133 #endif
   3134 		if (error != ENETRESET)
   3135 			break;
   3136 
   3137 		error = 0;
   3138 
   3139 		if (cmd == SIOCSIFCAP) {
   3140 			error = (*ifp->if_init)(ifp);
   3141 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3142 			;
   3143 		else if (ifp->if_flags & IFF_RUNNING) {
   3144 			/*
   3145 			 * Multicast list has changed; set the hardware filter
   3146 			 * accordingly.
   3147 			 */
   3148 			WM_CORE_LOCK(sc);
   3149 			wm_set_filter(sc);
   3150 			WM_CORE_UNLOCK(sc);
   3151 		}
   3152 		break;
   3153 	}
   3154 
   3155 #ifndef WM_MPSAFE
   3156 	splx(s);
   3157 #endif
   3158 	return error;
   3159 }
   3160 
   3161 /* MAC address related */
   3162 
   3163 /*
   3164  * Get the offset of MAC address and return it.
   3165  * If error occured, use offset 0.
   3166  */
   3167 static uint16_t
   3168 wm_check_alt_mac_addr(struct wm_softc *sc)
   3169 {
   3170 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3171 	uint16_t offset = NVM_OFF_MACADDR;
   3172 
   3173 	/* Try to read alternative MAC address pointer */
   3174 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3175 		return 0;
   3176 
   3177 	/* Check pointer if it's valid or not. */
   3178 	if ((offset == 0x0000) || (offset == 0xffff))
   3179 		return 0;
   3180 
   3181 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3182 	/*
   3183 	 * Check whether alternative MAC address is valid or not.
   3184 	 * Some cards have non 0xffff pointer but those don't use
   3185 	 * alternative MAC address in reality.
   3186 	 *
   3187 	 * Check whether the broadcast bit is set or not.
   3188 	 */
   3189 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3190 		if (((myea[0] & 0xff) & 0x01) == 0)
   3191 			return offset; /* Found */
   3192 
   3193 	/* Not found */
   3194 	return 0;
   3195 }
   3196 
   3197 static int
   3198 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3199 {
   3200 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3201 	uint16_t offset = NVM_OFF_MACADDR;
   3202 	int do_invert = 0;
   3203 
   3204 	switch (sc->sc_type) {
   3205 	case WM_T_82580:
   3206 	case WM_T_I350:
   3207 	case WM_T_I354:
   3208 		/* EEPROM Top Level Partitioning */
   3209 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3210 		break;
   3211 	case WM_T_82571:
   3212 	case WM_T_82575:
   3213 	case WM_T_82576:
   3214 	case WM_T_80003:
   3215 	case WM_T_I210:
   3216 	case WM_T_I211:
   3217 		offset = wm_check_alt_mac_addr(sc);
   3218 		if (offset == 0)
   3219 			if ((sc->sc_funcid & 0x01) == 1)
   3220 				do_invert = 1;
   3221 		break;
   3222 	default:
   3223 		if ((sc->sc_funcid & 0x01) == 1)
   3224 			do_invert = 1;
   3225 		break;
   3226 	}
   3227 
   3228 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3229 		goto bad;
   3230 
   3231 	enaddr[0] = myea[0] & 0xff;
   3232 	enaddr[1] = myea[0] >> 8;
   3233 	enaddr[2] = myea[1] & 0xff;
   3234 	enaddr[3] = myea[1] >> 8;
   3235 	enaddr[4] = myea[2] & 0xff;
   3236 	enaddr[5] = myea[2] >> 8;
   3237 
   3238 	/*
   3239 	 * Toggle the LSB of the MAC address on the second port
   3240 	 * of some dual port cards.
   3241 	 */
   3242 	if (do_invert != 0)
   3243 		enaddr[5] ^= 1;
   3244 
   3245 	return 0;
   3246 
   3247  bad:
   3248 	return -1;
   3249 }
   3250 
   3251 /*
   3252  * wm_set_ral:
   3253  *
   3254  *	Set an entery in the receive address list.
   3255  */
   3256 static void
   3257 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3258 {
   3259 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3260 	uint32_t wlock_mac;
   3261 	int rv;
   3262 
   3263 	if (enaddr != NULL) {
   3264 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3265 		    (enaddr[3] << 24);
   3266 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3267 		ral_hi |= RAL_AV;
   3268 	} else {
   3269 		ral_lo = 0;
   3270 		ral_hi = 0;
   3271 	}
   3272 
   3273 	switch (sc->sc_type) {
   3274 	case WM_T_82542_2_0:
   3275 	case WM_T_82542_2_1:
   3276 	case WM_T_82543:
   3277 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3278 		CSR_WRITE_FLUSH(sc);
   3279 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3280 		CSR_WRITE_FLUSH(sc);
   3281 		break;
   3282 	case WM_T_PCH2:
   3283 	case WM_T_PCH_LPT:
   3284 	case WM_T_PCH_SPT:
   3285 		if (idx == 0) {
   3286 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3287 			CSR_WRITE_FLUSH(sc);
   3288 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3289 			CSR_WRITE_FLUSH(sc);
   3290 			return;
   3291 		}
   3292 		if (sc->sc_type != WM_T_PCH2) {
   3293 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3294 			    FWSM_WLOCK_MAC);
   3295 			addrl = WMREG_SHRAL(idx - 1);
   3296 			addrh = WMREG_SHRAH(idx - 1);
   3297 		} else {
   3298 			wlock_mac = 0;
   3299 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3300 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3301 		}
   3302 
   3303 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3304 			rv = wm_get_swflag_ich8lan(sc);
   3305 			if (rv != 0)
   3306 				return;
   3307 			CSR_WRITE(sc, addrl, ral_lo);
   3308 			CSR_WRITE_FLUSH(sc);
   3309 			CSR_WRITE(sc, addrh, ral_hi);
   3310 			CSR_WRITE_FLUSH(sc);
   3311 			wm_put_swflag_ich8lan(sc);
   3312 		}
   3313 
   3314 		break;
   3315 	default:
   3316 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3317 		CSR_WRITE_FLUSH(sc);
   3318 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3319 		CSR_WRITE_FLUSH(sc);
   3320 		break;
   3321 	}
   3322 }
   3323 
   3324 /*
   3325  * wm_mchash:
   3326  *
   3327  *	Compute the hash of the multicast address for the 4096-bit
   3328  *	multicast filter.
   3329  */
   3330 static uint32_t
   3331 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3332 {
   3333 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3334 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3335 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3336 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3337 	uint32_t hash;
   3338 
   3339 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3340 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3341 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3342 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3343 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3344 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3345 		return (hash & 0x3ff);
   3346 	}
   3347 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3348 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3349 
   3350 	return (hash & 0xfff);
   3351 }
   3352 
   3353 /*
   3354  * wm_set_filter:
   3355  *
   3356  *	Set up the receive filter.
   3357  */
   3358 static void
   3359 wm_set_filter(struct wm_softc *sc)
   3360 {
   3361 	struct ethercom *ec = &sc->sc_ethercom;
   3362 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3363 	struct ether_multi *enm;
   3364 	struct ether_multistep step;
   3365 	bus_addr_t mta_reg;
   3366 	uint32_t hash, reg, bit;
   3367 	int i, size, ralmax;
   3368 
   3369 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3370 		device_xname(sc->sc_dev), __func__));
   3371 
   3372 	if (sc->sc_type >= WM_T_82544)
   3373 		mta_reg = WMREG_CORDOVA_MTA;
   3374 	else
   3375 		mta_reg = WMREG_MTA;
   3376 
   3377 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3378 
   3379 	if (ifp->if_flags & IFF_BROADCAST)
   3380 		sc->sc_rctl |= RCTL_BAM;
   3381 	if (ifp->if_flags & IFF_PROMISC) {
   3382 		sc->sc_rctl |= RCTL_UPE;
   3383 		goto allmulti;
   3384 	}
   3385 
   3386 	/*
   3387 	 * Set the station address in the first RAL slot, and
   3388 	 * clear the remaining slots.
   3389 	 */
   3390 	if (sc->sc_type == WM_T_ICH8)
   3391 		size = WM_RAL_TABSIZE_ICH8 -1;
   3392 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3393 	    || (sc->sc_type == WM_T_PCH))
   3394 		size = WM_RAL_TABSIZE_ICH8;
   3395 	else if (sc->sc_type == WM_T_PCH2)
   3396 		size = WM_RAL_TABSIZE_PCH2;
   3397 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3398 		size = WM_RAL_TABSIZE_PCH_LPT;
   3399 	else if (sc->sc_type == WM_T_82575)
   3400 		size = WM_RAL_TABSIZE_82575;
   3401 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3402 		size = WM_RAL_TABSIZE_82576;
   3403 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3404 		size = WM_RAL_TABSIZE_I350;
   3405 	else
   3406 		size = WM_RAL_TABSIZE;
   3407 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3408 
   3409 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3410 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3411 		switch (i) {
   3412 		case 0:
   3413 			/* We can use all entries */
   3414 			ralmax = size;
   3415 			break;
   3416 		case 1:
   3417 			/* Only RAR[0] */
   3418 			ralmax = 1;
   3419 			break;
   3420 		default:
   3421 			/* available SHRA + RAR[0] */
   3422 			ralmax = i + 1;
   3423 		}
   3424 	} else
   3425 		ralmax = size;
   3426 	for (i = 1; i < size; i++) {
   3427 		if (i < ralmax)
   3428 			wm_set_ral(sc, NULL, i);
   3429 	}
   3430 
   3431 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3432 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3433 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3434 	    || (sc->sc_type == WM_T_PCH_SPT))
   3435 		size = WM_ICH8_MC_TABSIZE;
   3436 	else
   3437 		size = WM_MC_TABSIZE;
   3438 	/* Clear out the multicast table. */
   3439 	for (i = 0; i < size; i++) {
   3440 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3441 		CSR_WRITE_FLUSH(sc);
   3442 	}
   3443 
   3444 	ETHER_LOCK(ec);
   3445 	ETHER_FIRST_MULTI(step, ec, enm);
   3446 	while (enm != NULL) {
   3447 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3448 			ETHER_UNLOCK(ec);
   3449 			/*
   3450 			 * We must listen to a range of multicast addresses.
   3451 			 * For now, just accept all multicasts, rather than
   3452 			 * trying to set only those filter bits needed to match
   3453 			 * the range.  (At this time, the only use of address
   3454 			 * ranges is for IP multicast routing, for which the
   3455 			 * range is big enough to require all bits set.)
   3456 			 */
   3457 			goto allmulti;
   3458 		}
   3459 
   3460 		hash = wm_mchash(sc, enm->enm_addrlo);
   3461 
   3462 		reg = (hash >> 5);
   3463 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3464 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3465 		    || (sc->sc_type == WM_T_PCH2)
   3466 		    || (sc->sc_type == WM_T_PCH_LPT)
   3467 		    || (sc->sc_type == WM_T_PCH_SPT))
   3468 			reg &= 0x1f;
   3469 		else
   3470 			reg &= 0x7f;
   3471 		bit = hash & 0x1f;
   3472 
   3473 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3474 		hash |= 1U << bit;
   3475 
   3476 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3477 			/*
   3478 			 * 82544 Errata 9: Certain register cannot be written
   3479 			 * with particular alignments in PCI-X bus operation
   3480 			 * (FCAH, MTA and VFTA).
   3481 			 */
   3482 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3483 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3484 			CSR_WRITE_FLUSH(sc);
   3485 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3486 			CSR_WRITE_FLUSH(sc);
   3487 		} else {
   3488 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3489 			CSR_WRITE_FLUSH(sc);
   3490 		}
   3491 
   3492 		ETHER_NEXT_MULTI(step, enm);
   3493 	}
   3494 	ETHER_UNLOCK(ec);
   3495 
   3496 	ifp->if_flags &= ~IFF_ALLMULTI;
   3497 	goto setit;
   3498 
   3499  allmulti:
   3500 	ifp->if_flags |= IFF_ALLMULTI;
   3501 	sc->sc_rctl |= RCTL_MPE;
   3502 
   3503  setit:
   3504 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3505 }
   3506 
   3507 /* Reset and init related */
   3508 
   3509 static void
   3510 wm_set_vlan(struct wm_softc *sc)
   3511 {
   3512 
   3513 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3514 		device_xname(sc->sc_dev), __func__));
   3515 
   3516 	/* Deal with VLAN enables. */
   3517 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3518 		sc->sc_ctrl |= CTRL_VME;
   3519 	else
   3520 		sc->sc_ctrl &= ~CTRL_VME;
   3521 
   3522 	/* Write the control registers. */
   3523 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3524 }
   3525 
   3526 static void
   3527 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3528 {
   3529 	uint32_t gcr;
   3530 	pcireg_t ctrl2;
   3531 
   3532 	gcr = CSR_READ(sc, WMREG_GCR);
   3533 
   3534 	/* Only take action if timeout value is defaulted to 0 */
   3535 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3536 		goto out;
   3537 
   3538 	if ((gcr & GCR_CAP_VER2) == 0) {
   3539 		gcr |= GCR_CMPL_TMOUT_10MS;
   3540 		goto out;
   3541 	}
   3542 
   3543 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3544 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3545 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3546 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3547 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3548 
   3549 out:
   3550 	/* Disable completion timeout resend */
   3551 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3552 
   3553 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3554 }
   3555 
   3556 void
   3557 wm_get_auto_rd_done(struct wm_softc *sc)
   3558 {
   3559 	int i;
   3560 
   3561 	/* wait for eeprom to reload */
   3562 	switch (sc->sc_type) {
   3563 	case WM_T_82571:
   3564 	case WM_T_82572:
   3565 	case WM_T_82573:
   3566 	case WM_T_82574:
   3567 	case WM_T_82583:
   3568 	case WM_T_82575:
   3569 	case WM_T_82576:
   3570 	case WM_T_82580:
   3571 	case WM_T_I350:
   3572 	case WM_T_I354:
   3573 	case WM_T_I210:
   3574 	case WM_T_I211:
   3575 	case WM_T_80003:
   3576 	case WM_T_ICH8:
   3577 	case WM_T_ICH9:
   3578 		for (i = 0; i < 10; i++) {
   3579 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3580 				break;
   3581 			delay(1000);
   3582 		}
   3583 		if (i == 10) {
   3584 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3585 			    "complete\n", device_xname(sc->sc_dev));
   3586 		}
   3587 		break;
   3588 	default:
   3589 		break;
   3590 	}
   3591 }
   3592 
   3593 void
   3594 wm_lan_init_done(struct wm_softc *sc)
   3595 {
   3596 	uint32_t reg = 0;
   3597 	int i;
   3598 
   3599 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3600 		device_xname(sc->sc_dev), __func__));
   3601 
   3602 	/* Wait for eeprom to reload */
   3603 	switch (sc->sc_type) {
   3604 	case WM_T_ICH10:
   3605 	case WM_T_PCH:
   3606 	case WM_T_PCH2:
   3607 	case WM_T_PCH_LPT:
   3608 	case WM_T_PCH_SPT:
   3609 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3610 			reg = CSR_READ(sc, WMREG_STATUS);
   3611 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3612 				break;
   3613 			delay(100);
   3614 		}
   3615 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3616 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3617 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3618 		}
   3619 		break;
   3620 	default:
   3621 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3622 		    __func__);
   3623 		break;
   3624 	}
   3625 
   3626 	reg &= ~STATUS_LAN_INIT_DONE;
   3627 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3628 }
   3629 
   3630 void
   3631 wm_get_cfg_done(struct wm_softc *sc)
   3632 {
   3633 	int mask;
   3634 	uint32_t reg;
   3635 	int i;
   3636 
   3637 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3638 		device_xname(sc->sc_dev), __func__));
   3639 
   3640 	/* Wait for eeprom to reload */
   3641 	switch (sc->sc_type) {
   3642 	case WM_T_82542_2_0:
   3643 	case WM_T_82542_2_1:
   3644 		/* null */
   3645 		break;
   3646 	case WM_T_82543:
   3647 	case WM_T_82544:
   3648 	case WM_T_82540:
   3649 	case WM_T_82545:
   3650 	case WM_T_82545_3:
   3651 	case WM_T_82546:
   3652 	case WM_T_82546_3:
   3653 	case WM_T_82541:
   3654 	case WM_T_82541_2:
   3655 	case WM_T_82547:
   3656 	case WM_T_82547_2:
   3657 	case WM_T_82573:
   3658 	case WM_T_82574:
   3659 	case WM_T_82583:
   3660 		/* generic */
   3661 		delay(10*1000);
   3662 		break;
   3663 	case WM_T_80003:
   3664 	case WM_T_82571:
   3665 	case WM_T_82572:
   3666 	case WM_T_82575:
   3667 	case WM_T_82576:
   3668 	case WM_T_82580:
   3669 	case WM_T_I350:
   3670 	case WM_T_I354:
   3671 	case WM_T_I210:
   3672 	case WM_T_I211:
   3673 		if (sc->sc_type == WM_T_82571) {
   3674 			/* Only 82571 shares port 0 */
   3675 			mask = EEMNGCTL_CFGDONE_0;
   3676 		} else
   3677 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3678 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3679 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3680 				break;
   3681 			delay(1000);
   3682 		}
   3683 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3684 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3685 				device_xname(sc->sc_dev), __func__));
   3686 		}
   3687 		break;
   3688 	case WM_T_ICH8:
   3689 	case WM_T_ICH9:
   3690 	case WM_T_ICH10:
   3691 	case WM_T_PCH:
   3692 	case WM_T_PCH2:
   3693 	case WM_T_PCH_LPT:
   3694 	case WM_T_PCH_SPT:
   3695 		delay(10*1000);
   3696 		if (sc->sc_type >= WM_T_ICH10)
   3697 			wm_lan_init_done(sc);
   3698 		else
   3699 			wm_get_auto_rd_done(sc);
   3700 
   3701 		reg = CSR_READ(sc, WMREG_STATUS);
   3702 		if ((reg & STATUS_PHYRA) != 0)
   3703 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3704 		break;
   3705 	default:
   3706 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3707 		    __func__);
   3708 		break;
   3709 	}
   3710 }
   3711 
   3712 void
   3713 wm_phy_post_reset(struct wm_softc *sc)
   3714 {
   3715 	uint32_t reg;
   3716 
   3717 	/* This function is only for ICH8 and newer. */
   3718 	if (sc->sc_type < WM_T_ICH8)
   3719 		return;
   3720 
   3721 	if (wm_phy_resetisblocked(sc)) {
   3722 		/* XXX */
   3723 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3724 		return;
   3725 	}
   3726 
   3727 	/* Allow time for h/w to get to quiescent state after reset */
   3728 	delay(10*1000);
   3729 
   3730 	/* Perform any necessary post-reset workarounds */
   3731 	if (sc->sc_type == WM_T_PCH)
   3732 		wm_hv_phy_workaround_ich8lan(sc);
   3733 	if (sc->sc_type == WM_T_PCH2)
   3734 		wm_lv_phy_workaround_ich8lan(sc);
   3735 
   3736 	/* Clear the host wakeup bit after lcd reset */
   3737 	if (sc->sc_type >= WM_T_PCH) {
   3738 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3739 		    BM_PORT_GEN_CFG);
   3740 		reg &= ~BM_WUC_HOST_WU_BIT;
   3741 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3742 		    BM_PORT_GEN_CFG, reg);
   3743 	}
   3744 
   3745 	/* Configure the LCD with the extended configuration region in NVM */
   3746 	wm_init_lcd_from_nvm(sc);
   3747 
   3748 	/* Configure the LCD with the OEM bits in NVM */
   3749 }
   3750 
   3751 /* Only for PCH and newer */
   3752 static void
   3753 wm_write_smbus_addr(struct wm_softc *sc)
   3754 {
   3755 	uint32_t strap, freq;
   3756 	uint32_t phy_data;
   3757 
   3758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3759 		device_xname(sc->sc_dev), __func__));
   3760 
   3761 	strap = CSR_READ(sc, WMREG_STRAP);
   3762 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3763 
   3764 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3765 
   3766 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3767 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3768 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3769 
   3770 	if (sc->sc_phytype == WMPHY_I217) {
   3771 		/* Restore SMBus frequency */
   3772 		if (freq --) {
   3773 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3774 			    | HV_SMB_ADDR_FREQ_HIGH);
   3775 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3776 			    HV_SMB_ADDR_FREQ_LOW);
   3777 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3778 			    HV_SMB_ADDR_FREQ_HIGH);
   3779 		} else {
   3780 			DPRINTF(WM_DEBUG_INIT,
   3781 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3782 				device_xname(sc->sc_dev), __func__));
   3783 		}
   3784 	}
   3785 
   3786 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3787 }
   3788 
   3789 void
   3790 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3791 {
   3792 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3793 	uint16_t phy_page = 0;
   3794 
   3795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3796 		device_xname(sc->sc_dev), __func__));
   3797 
   3798 	switch (sc->sc_type) {
   3799 	case WM_T_ICH8:
   3800 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3801 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3802 			return;
   3803 
   3804 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3805 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3806 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3807 			break;
   3808 		}
   3809 		/* FALLTHROUGH */
   3810 	case WM_T_PCH:
   3811 	case WM_T_PCH2:
   3812 	case WM_T_PCH_LPT:
   3813 	case WM_T_PCH_SPT:
   3814 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3815 		break;
   3816 	default:
   3817 		return;
   3818 	}
   3819 
   3820 	sc->phy.acquire(sc);
   3821 
   3822 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3823 	if ((reg & sw_cfg_mask) == 0)
   3824 		goto release;
   3825 
   3826 	/*
   3827 	 * Make sure HW does not configure LCD from PHY extended configuration
   3828 	 * before SW configuration
   3829 	 */
   3830 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3831 	if ((sc->sc_type < WM_T_PCH2)
   3832 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3833 		goto release;
   3834 
   3835 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3836 		device_xname(sc->sc_dev), __func__));
   3837 	/* word_addr is in DWORD */
   3838 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3839 
   3840 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3841 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3842 
   3843 	if (((sc->sc_type == WM_T_PCH)
   3844 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3845 	    || (sc->sc_type > WM_T_PCH)) {
   3846 		/*
   3847 		 * HW configures the SMBus address and LEDs when the OEM and
   3848 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3849 		 * are cleared, SW will configure them instead.
   3850 		 */
   3851 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3852 			device_xname(sc->sc_dev), __func__));
   3853 		wm_write_smbus_addr(sc);
   3854 
   3855 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3856 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3857 	}
   3858 
   3859 	/* Configure LCD from extended configuration region. */
   3860 	for (i = 0; i < cnf_size; i++) {
   3861 		uint16_t reg_data, reg_addr;
   3862 
   3863 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3864 			goto release;
   3865 
   3866 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3867 			goto release;
   3868 
   3869 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3870 			phy_page = reg_data;
   3871 
   3872 		reg_addr &= IGPHY_MAXREGADDR;
   3873 		reg_addr |= phy_page;
   3874 
   3875 		sc->phy.release(sc); /* XXX */
   3876 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3877 		sc->phy.acquire(sc); /* XXX */
   3878 	}
   3879 
   3880 release:
   3881 	sc->phy.release(sc);
   3882 	return;
   3883 }
   3884 
   3885 
   3886 /* Init hardware bits */
   3887 void
   3888 wm_initialize_hardware_bits(struct wm_softc *sc)
   3889 {
   3890 	uint32_t tarc0, tarc1, reg;
   3891 
   3892 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3893 		device_xname(sc->sc_dev), __func__));
   3894 
   3895 	/* For 82571 variant, 80003 and ICHs */
   3896 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3897 	    || (sc->sc_type >= WM_T_80003)) {
   3898 
   3899 		/* Transmit Descriptor Control 0 */
   3900 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3901 		reg |= TXDCTL_COUNT_DESC;
   3902 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3903 
   3904 		/* Transmit Descriptor Control 1 */
   3905 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3906 		reg |= TXDCTL_COUNT_DESC;
   3907 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3908 
   3909 		/* TARC0 */
   3910 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3911 		switch (sc->sc_type) {
   3912 		case WM_T_82571:
   3913 		case WM_T_82572:
   3914 		case WM_T_82573:
   3915 		case WM_T_82574:
   3916 		case WM_T_82583:
   3917 		case WM_T_80003:
   3918 			/* Clear bits 30..27 */
   3919 			tarc0 &= ~__BITS(30, 27);
   3920 			break;
   3921 		default:
   3922 			break;
   3923 		}
   3924 
   3925 		switch (sc->sc_type) {
   3926 		case WM_T_82571:
   3927 		case WM_T_82572:
   3928 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3929 
   3930 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3931 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3932 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3933 			/* 8257[12] Errata No.7 */
   3934 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3935 
   3936 			/* TARC1 bit 28 */
   3937 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3938 				tarc1 &= ~__BIT(28);
   3939 			else
   3940 				tarc1 |= __BIT(28);
   3941 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3942 
   3943 			/*
   3944 			 * 8257[12] Errata No.13
   3945 			 * Disable Dyamic Clock Gating.
   3946 			 */
   3947 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3948 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3949 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3950 			break;
   3951 		case WM_T_82573:
   3952 		case WM_T_82574:
   3953 		case WM_T_82583:
   3954 			if ((sc->sc_type == WM_T_82574)
   3955 			    || (sc->sc_type == WM_T_82583))
   3956 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3957 
   3958 			/* Extended Device Control */
   3959 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3960 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3961 			reg |= __BIT(22);	/* Set bit 22 */
   3962 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3963 
   3964 			/* Device Control */
   3965 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3966 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3967 
   3968 			/* PCIe Control Register */
   3969 			/*
   3970 			 * 82573 Errata (unknown).
   3971 			 *
   3972 			 * 82574 Errata 25 and 82583 Errata 12
   3973 			 * "Dropped Rx Packets":
   3974 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3975 			 */
   3976 			reg = CSR_READ(sc, WMREG_GCR);
   3977 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3978 			CSR_WRITE(sc, WMREG_GCR, reg);
   3979 
   3980 			if ((sc->sc_type == WM_T_82574)
   3981 			    || (sc->sc_type == WM_T_82583)) {
   3982 				/*
   3983 				 * Document says this bit must be set for
   3984 				 * proper operation.
   3985 				 */
   3986 				reg = CSR_READ(sc, WMREG_GCR);
   3987 				reg |= __BIT(22);
   3988 				CSR_WRITE(sc, WMREG_GCR, reg);
   3989 
   3990 				/*
   3991 				 * Apply workaround for hardware errata
   3992 				 * documented in errata docs Fixes issue where
   3993 				 * some error prone or unreliable PCIe
   3994 				 * completions are occurring, particularly
   3995 				 * with ASPM enabled. Without fix, issue can
   3996 				 * cause Tx timeouts.
   3997 				 */
   3998 				reg = CSR_READ(sc, WMREG_GCR2);
   3999 				reg |= __BIT(0);
   4000 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4001 			}
   4002 			break;
   4003 		case WM_T_80003:
   4004 			/* TARC0 */
   4005 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4006 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4007 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4008 
   4009 			/* TARC1 bit 28 */
   4010 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4011 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4012 				tarc1 &= ~__BIT(28);
   4013 			else
   4014 				tarc1 |= __BIT(28);
   4015 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4016 			break;
   4017 		case WM_T_ICH8:
   4018 		case WM_T_ICH9:
   4019 		case WM_T_ICH10:
   4020 		case WM_T_PCH:
   4021 		case WM_T_PCH2:
   4022 		case WM_T_PCH_LPT:
   4023 		case WM_T_PCH_SPT:
   4024 			/* TARC0 */
   4025 			if ((sc->sc_type == WM_T_ICH8)
   4026 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   4027 				/* Set TARC0 bits 29 and 28 */
   4028 				tarc0 |= __BITS(29, 28);
   4029 			}
   4030 			/* Set TARC0 bits 23,24,26,27 */
   4031 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4032 
   4033 			/* CTRL_EXT */
   4034 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4035 			reg |= __BIT(22);	/* Set bit 22 */
   4036 			/*
   4037 			 * Enable PHY low-power state when MAC is at D3
   4038 			 * w/o WoL
   4039 			 */
   4040 			if (sc->sc_type >= WM_T_PCH)
   4041 				reg |= CTRL_EXT_PHYPDEN;
   4042 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4043 
   4044 			/* TARC1 */
   4045 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4046 			/* bit 28 */
   4047 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4048 				tarc1 &= ~__BIT(28);
   4049 			else
   4050 				tarc1 |= __BIT(28);
   4051 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4052 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4053 
   4054 			/* Device Status */
   4055 			if (sc->sc_type == WM_T_ICH8) {
   4056 				reg = CSR_READ(sc, WMREG_STATUS);
   4057 				reg &= ~__BIT(31);
   4058 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4059 
   4060 			}
   4061 
   4062 			/* IOSFPC */
   4063 			if (sc->sc_type == WM_T_PCH_SPT) {
   4064 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4065 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4066 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4067 			}
   4068 			/*
   4069 			 * Work-around descriptor data corruption issue during
   4070 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4071 			 * capability.
   4072 			 */
   4073 			reg = CSR_READ(sc, WMREG_RFCTL);
   4074 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4075 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4076 			break;
   4077 		default:
   4078 			break;
   4079 		}
   4080 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4081 
   4082 		switch (sc->sc_type) {
   4083 		/*
   4084 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4085 		 * Avoid RSS Hash Value bug.
   4086 		 */
   4087 		case WM_T_82571:
   4088 		case WM_T_82572:
   4089 		case WM_T_82573:
   4090 		case WM_T_80003:
   4091 		case WM_T_ICH8:
   4092 			reg = CSR_READ(sc, WMREG_RFCTL);
   4093 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4095 			break;
   4096 		case WM_T_82574:
   4097 			/* use extened Rx descriptor. */
   4098 			reg = CSR_READ(sc, WMREG_RFCTL);
   4099 			reg |= WMREG_RFCTL_EXSTEN;
   4100 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4101 			break;
   4102 		default:
   4103 			break;
   4104 		}
   4105 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4106 		/*
   4107 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4108 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4109 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4110 		 * Correctly by the Device"
   4111 		 *
   4112 		 * I354(C2000) Errata AVR53:
   4113 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4114 		 * Hang"
   4115 		 */
   4116 		reg = CSR_READ(sc, WMREG_RFCTL);
   4117 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4118 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4119 	}
   4120 }
   4121 
   4122 static uint32_t
   4123 wm_rxpbs_adjust_82580(uint32_t val)
   4124 {
   4125 	uint32_t rv = 0;
   4126 
   4127 	if (val < __arraycount(wm_82580_rxpbs_table))
   4128 		rv = wm_82580_rxpbs_table[val];
   4129 
   4130 	return rv;
   4131 }
   4132 
   4133 /*
   4134  * wm_reset_phy:
   4135  *
   4136  *	generic PHY reset function.
   4137  *	Same as e1000_phy_hw_reset_generic()
   4138  */
   4139 static void
   4140 wm_reset_phy(struct wm_softc *sc)
   4141 {
   4142 	uint32_t reg;
   4143 
   4144 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4145 		device_xname(sc->sc_dev), __func__));
   4146 	if (wm_phy_resetisblocked(sc))
   4147 		return;
   4148 
   4149 	sc->phy.acquire(sc);
   4150 
   4151 	reg = CSR_READ(sc, WMREG_CTRL);
   4152 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4153 	CSR_WRITE_FLUSH(sc);
   4154 
   4155 	delay(sc->phy.reset_delay_us);
   4156 
   4157 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4158 	CSR_WRITE_FLUSH(sc);
   4159 
   4160 	delay(150);
   4161 
   4162 	sc->phy.release(sc);
   4163 
   4164 	wm_get_cfg_done(sc);
   4165 	wm_phy_post_reset(sc);
   4166 }
   4167 
   4168 static void
   4169 wm_flush_desc_rings(struct wm_softc *sc)
   4170 {
   4171 	pcireg_t preg;
   4172 	uint32_t reg;
   4173 	struct wm_txqueue *txq;
   4174 	wiseman_txdesc_t *txd;
   4175 	int nexttx;
   4176 	uint32_t rctl;
   4177 
   4178 	/* First, disable MULR fix in FEXTNVM11 */
   4179 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4180 	reg |= FEXTNVM11_DIS_MULRFIX;
   4181 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4182 
   4183 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4184 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4185 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4186 		return;
   4187 
   4188 	/* TX */
   4189 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4190 	    device_xname(sc->sc_dev), preg, reg);
   4191 	reg = CSR_READ(sc, WMREG_TCTL);
   4192 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4193 
   4194 	txq = &sc->sc_queue[0].wmq_txq;
   4195 	nexttx = txq->txq_next;
   4196 	txd = &txq->txq_descs[nexttx];
   4197 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4198 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4199 	txd->wtx_fields.wtxu_status = 0;
   4200 	txd->wtx_fields.wtxu_options = 0;
   4201 	txd->wtx_fields.wtxu_vlan = 0;
   4202 
   4203 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4204 	    BUS_SPACE_BARRIER_WRITE);
   4205 
   4206 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4207 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4208 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4209 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4210 	delay(250);
   4211 
   4212 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4213 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4214 		return;
   4215 
   4216 	/* RX */
   4217 	printf("%s: Need RX flush (reg = %08x)\n",
   4218 	    device_xname(sc->sc_dev), preg);
   4219 	rctl = CSR_READ(sc, WMREG_RCTL);
   4220 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4221 	CSR_WRITE_FLUSH(sc);
   4222 	delay(150);
   4223 
   4224 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4225 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4226 	reg &= 0xffffc000;
   4227 	/*
   4228 	 * update thresholds: prefetch threshold to 31, host threshold
   4229 	 * to 1 and make sure the granularity is "descriptors" and not
   4230 	 * "cache lines"
   4231 	 */
   4232 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4233 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4234 
   4235 	/*
   4236 	 * momentarily enable the RX ring for the changes to take
   4237 	 * effect
   4238 	 */
   4239 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4240 	CSR_WRITE_FLUSH(sc);
   4241 	delay(150);
   4242 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4243 }
   4244 
   4245 /*
   4246  * wm_reset:
   4247  *
   4248  *	Reset the i82542 chip.
   4249  */
   4250 static void
   4251 wm_reset(struct wm_softc *sc)
   4252 {
   4253 	int phy_reset = 0;
   4254 	int i, error = 0;
   4255 	uint32_t reg;
   4256 	uint16_t kmreg;
   4257 	int rv;
   4258 
   4259 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4260 		device_xname(sc->sc_dev), __func__));
   4261 	KASSERT(sc->sc_type != 0);
   4262 
   4263 	/*
   4264 	 * Allocate on-chip memory according to the MTU size.
   4265 	 * The Packet Buffer Allocation register must be written
   4266 	 * before the chip is reset.
   4267 	 */
   4268 	switch (sc->sc_type) {
   4269 	case WM_T_82547:
   4270 	case WM_T_82547_2:
   4271 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4272 		    PBA_22K : PBA_30K;
   4273 		for (i = 0; i < sc->sc_nqueues; i++) {
   4274 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4275 			txq->txq_fifo_head = 0;
   4276 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4277 			txq->txq_fifo_size =
   4278 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4279 			txq->txq_fifo_stall = 0;
   4280 		}
   4281 		break;
   4282 	case WM_T_82571:
   4283 	case WM_T_82572:
   4284 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4285 	case WM_T_80003:
   4286 		sc->sc_pba = PBA_32K;
   4287 		break;
   4288 	case WM_T_82573:
   4289 		sc->sc_pba = PBA_12K;
   4290 		break;
   4291 	case WM_T_82574:
   4292 	case WM_T_82583:
   4293 		sc->sc_pba = PBA_20K;
   4294 		break;
   4295 	case WM_T_82576:
   4296 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4297 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4298 		break;
   4299 	case WM_T_82580:
   4300 	case WM_T_I350:
   4301 	case WM_T_I354:
   4302 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4303 		break;
   4304 	case WM_T_I210:
   4305 	case WM_T_I211:
   4306 		sc->sc_pba = PBA_34K;
   4307 		break;
   4308 	case WM_T_ICH8:
   4309 		/* Workaround for a bit corruption issue in FIFO memory */
   4310 		sc->sc_pba = PBA_8K;
   4311 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4312 		break;
   4313 	case WM_T_ICH9:
   4314 	case WM_T_ICH10:
   4315 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4316 		    PBA_14K : PBA_10K;
   4317 		break;
   4318 	case WM_T_PCH:
   4319 	case WM_T_PCH2:
   4320 	case WM_T_PCH_LPT:
   4321 	case WM_T_PCH_SPT:
   4322 		sc->sc_pba = PBA_26K;
   4323 		break;
   4324 	default:
   4325 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4326 		    PBA_40K : PBA_48K;
   4327 		break;
   4328 	}
   4329 	/*
   4330 	 * Only old or non-multiqueue devices have the PBA register
   4331 	 * XXX Need special handling for 82575.
   4332 	 */
   4333 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4334 	    || (sc->sc_type == WM_T_82575))
   4335 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4336 
   4337 	/* Prevent the PCI-E bus from sticking */
   4338 	if (sc->sc_flags & WM_F_PCIE) {
   4339 		int timeout = 800;
   4340 
   4341 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4342 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4343 
   4344 		while (timeout--) {
   4345 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4346 			    == 0)
   4347 				break;
   4348 			delay(100);
   4349 		}
   4350 		if (timeout == 0)
   4351 			device_printf(sc->sc_dev,
   4352 			    "failed to disable busmastering\n");
   4353 	}
   4354 
   4355 	/* Set the completion timeout for interface */
   4356 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4357 	    || (sc->sc_type == WM_T_82580)
   4358 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4359 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4360 		wm_set_pcie_completion_timeout(sc);
   4361 
   4362 	/* Clear interrupt */
   4363 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4364 	if (wm_is_using_msix(sc)) {
   4365 		if (sc->sc_type != WM_T_82574) {
   4366 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4367 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4368 		} else {
   4369 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4370 		}
   4371 	}
   4372 
   4373 	/* Stop the transmit and receive processes. */
   4374 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4375 	sc->sc_rctl &= ~RCTL_EN;
   4376 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4377 	CSR_WRITE_FLUSH(sc);
   4378 
   4379 	/* XXX set_tbi_sbp_82543() */
   4380 
   4381 	delay(10*1000);
   4382 
   4383 	/* Must acquire the MDIO ownership before MAC reset */
   4384 	switch (sc->sc_type) {
   4385 	case WM_T_82573:
   4386 	case WM_T_82574:
   4387 	case WM_T_82583:
   4388 		error = wm_get_hw_semaphore_82573(sc);
   4389 		break;
   4390 	default:
   4391 		break;
   4392 	}
   4393 
   4394 	/*
   4395 	 * 82541 Errata 29? & 82547 Errata 28?
   4396 	 * See also the description about PHY_RST bit in CTRL register
   4397 	 * in 8254x_GBe_SDM.pdf.
   4398 	 */
   4399 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4400 		CSR_WRITE(sc, WMREG_CTRL,
   4401 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4402 		CSR_WRITE_FLUSH(sc);
   4403 		delay(5000);
   4404 	}
   4405 
   4406 	switch (sc->sc_type) {
   4407 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4408 	case WM_T_82541:
   4409 	case WM_T_82541_2:
   4410 	case WM_T_82547:
   4411 	case WM_T_82547_2:
   4412 		/*
   4413 		 * On some chipsets, a reset through a memory-mapped write
   4414 		 * cycle can cause the chip to reset before completing the
   4415 		 * write cycle.  This causes major headache that can be
   4416 		 * avoided by issuing the reset via indirect register writes
   4417 		 * through I/O space.
   4418 		 *
   4419 		 * So, if we successfully mapped the I/O BAR at attach time,
   4420 		 * use that.  Otherwise, try our luck with a memory-mapped
   4421 		 * reset.
   4422 		 */
   4423 		if (sc->sc_flags & WM_F_IOH_VALID)
   4424 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4425 		else
   4426 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4427 		break;
   4428 	case WM_T_82545_3:
   4429 	case WM_T_82546_3:
   4430 		/* Use the shadow control register on these chips. */
   4431 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4432 		break;
   4433 	case WM_T_80003:
   4434 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4435 		sc->phy.acquire(sc);
   4436 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4437 		sc->phy.release(sc);
   4438 		break;
   4439 	case WM_T_ICH8:
   4440 	case WM_T_ICH9:
   4441 	case WM_T_ICH10:
   4442 	case WM_T_PCH:
   4443 	case WM_T_PCH2:
   4444 	case WM_T_PCH_LPT:
   4445 	case WM_T_PCH_SPT:
   4446 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4447 		if (wm_phy_resetisblocked(sc) == false) {
   4448 			/*
   4449 			 * Gate automatic PHY configuration by hardware on
   4450 			 * non-managed 82579
   4451 			 */
   4452 			if ((sc->sc_type == WM_T_PCH2)
   4453 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4454 				== 0))
   4455 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4456 
   4457 			reg |= CTRL_PHY_RESET;
   4458 			phy_reset = 1;
   4459 		} else
   4460 			printf("XXX reset is blocked!!!\n");
   4461 		sc->phy.acquire(sc);
   4462 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4463 		/* Don't insert a completion barrier when reset */
   4464 		delay(20*1000);
   4465 		mutex_exit(sc->sc_ich_phymtx);
   4466 		break;
   4467 	case WM_T_82580:
   4468 	case WM_T_I350:
   4469 	case WM_T_I354:
   4470 	case WM_T_I210:
   4471 	case WM_T_I211:
   4472 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4473 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4474 			CSR_WRITE_FLUSH(sc);
   4475 		delay(5000);
   4476 		break;
   4477 	case WM_T_82542_2_0:
   4478 	case WM_T_82542_2_1:
   4479 	case WM_T_82543:
   4480 	case WM_T_82540:
   4481 	case WM_T_82545:
   4482 	case WM_T_82546:
   4483 	case WM_T_82571:
   4484 	case WM_T_82572:
   4485 	case WM_T_82573:
   4486 	case WM_T_82574:
   4487 	case WM_T_82575:
   4488 	case WM_T_82576:
   4489 	case WM_T_82583:
   4490 	default:
   4491 		/* Everything else can safely use the documented method. */
   4492 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4493 		break;
   4494 	}
   4495 
   4496 	/* Must release the MDIO ownership after MAC reset */
   4497 	switch (sc->sc_type) {
   4498 	case WM_T_82573:
   4499 	case WM_T_82574:
   4500 	case WM_T_82583:
   4501 		if (error == 0)
   4502 			wm_put_hw_semaphore_82573(sc);
   4503 		break;
   4504 	default:
   4505 		break;
   4506 	}
   4507 
   4508 	if (phy_reset != 0)
   4509 		wm_get_cfg_done(sc);
   4510 
   4511 	/* reload EEPROM */
   4512 	switch (sc->sc_type) {
   4513 	case WM_T_82542_2_0:
   4514 	case WM_T_82542_2_1:
   4515 	case WM_T_82543:
   4516 	case WM_T_82544:
   4517 		delay(10);
   4518 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4519 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4520 		CSR_WRITE_FLUSH(sc);
   4521 		delay(2000);
   4522 		break;
   4523 	case WM_T_82540:
   4524 	case WM_T_82545:
   4525 	case WM_T_82545_3:
   4526 	case WM_T_82546:
   4527 	case WM_T_82546_3:
   4528 		delay(5*1000);
   4529 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4530 		break;
   4531 	case WM_T_82541:
   4532 	case WM_T_82541_2:
   4533 	case WM_T_82547:
   4534 	case WM_T_82547_2:
   4535 		delay(20000);
   4536 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4537 		break;
   4538 	case WM_T_82571:
   4539 	case WM_T_82572:
   4540 	case WM_T_82573:
   4541 	case WM_T_82574:
   4542 	case WM_T_82583:
   4543 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4544 			delay(10);
   4545 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4546 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4547 			CSR_WRITE_FLUSH(sc);
   4548 		}
   4549 		/* check EECD_EE_AUTORD */
   4550 		wm_get_auto_rd_done(sc);
   4551 		/*
   4552 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4553 		 * is set.
   4554 		 */
   4555 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4556 		    || (sc->sc_type == WM_T_82583))
   4557 			delay(25*1000);
   4558 		break;
   4559 	case WM_T_82575:
   4560 	case WM_T_82576:
   4561 	case WM_T_82580:
   4562 	case WM_T_I350:
   4563 	case WM_T_I354:
   4564 	case WM_T_I210:
   4565 	case WM_T_I211:
   4566 	case WM_T_80003:
   4567 		/* check EECD_EE_AUTORD */
   4568 		wm_get_auto_rd_done(sc);
   4569 		break;
   4570 	case WM_T_ICH8:
   4571 	case WM_T_ICH9:
   4572 	case WM_T_ICH10:
   4573 	case WM_T_PCH:
   4574 	case WM_T_PCH2:
   4575 	case WM_T_PCH_LPT:
   4576 	case WM_T_PCH_SPT:
   4577 		break;
   4578 	default:
   4579 		panic("%s: unknown type\n", __func__);
   4580 	}
   4581 
   4582 	/* Check whether EEPROM is present or not */
   4583 	switch (sc->sc_type) {
   4584 	case WM_T_82575:
   4585 	case WM_T_82576:
   4586 	case WM_T_82580:
   4587 	case WM_T_I350:
   4588 	case WM_T_I354:
   4589 	case WM_T_ICH8:
   4590 	case WM_T_ICH9:
   4591 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4592 			/* Not found */
   4593 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4594 			if (sc->sc_type == WM_T_82575)
   4595 				wm_reset_init_script_82575(sc);
   4596 		}
   4597 		break;
   4598 	default:
   4599 		break;
   4600 	}
   4601 
   4602 	if (phy_reset != 0)
   4603 		wm_phy_post_reset(sc);
   4604 
   4605 	if ((sc->sc_type == WM_T_82580)
   4606 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4607 		/* clear global device reset status bit */
   4608 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4609 	}
   4610 
   4611 	/* Clear any pending interrupt events. */
   4612 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4613 	reg = CSR_READ(sc, WMREG_ICR);
   4614 	if (wm_is_using_msix(sc)) {
   4615 		if (sc->sc_type != WM_T_82574) {
   4616 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4617 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4618 		} else
   4619 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4620 	}
   4621 
   4622 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4623 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4624 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4625 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4626 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4627 		reg |= KABGTXD_BGSQLBIAS;
   4628 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4629 	}
   4630 
   4631 	/* reload sc_ctrl */
   4632 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4633 
   4634 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4635 		wm_set_eee_i350(sc);
   4636 
   4637 	/*
   4638 	 * For PCH, this write will make sure that any noise will be detected
   4639 	 * as a CRC error and be dropped rather than show up as a bad packet
   4640 	 * to the DMA engine
   4641 	 */
   4642 	if (sc->sc_type == WM_T_PCH)
   4643 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4644 
   4645 	if (sc->sc_type >= WM_T_82544)
   4646 		CSR_WRITE(sc, WMREG_WUC, 0);
   4647 
   4648 	wm_reset_mdicnfg_82580(sc);
   4649 
   4650 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4651 		wm_pll_workaround_i210(sc);
   4652 
   4653 	if (sc->sc_type == WM_T_80003) {
   4654 		/* default to TRUE to enable the MDIC W/A */
   4655 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4656 
   4657 		rv = wm_kmrn_readreg(sc,
   4658 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4659 		if (rv == 0) {
   4660 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4661 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4662 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4663 			else
   4664 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4665 		}
   4666 	}
   4667 }
   4668 
   4669 /*
   4670  * wm_add_rxbuf:
   4671  *
   4672  *	Add a receive buffer to the indiciated descriptor.
   4673  */
   4674 static int
   4675 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4676 {
   4677 	struct wm_softc *sc = rxq->rxq_sc;
   4678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4679 	struct mbuf *m;
   4680 	int error;
   4681 
   4682 	KASSERT(mutex_owned(rxq->rxq_lock));
   4683 
   4684 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4685 	if (m == NULL)
   4686 		return ENOBUFS;
   4687 
   4688 	MCLGET(m, M_DONTWAIT);
   4689 	if ((m->m_flags & M_EXT) == 0) {
   4690 		m_freem(m);
   4691 		return ENOBUFS;
   4692 	}
   4693 
   4694 	if (rxs->rxs_mbuf != NULL)
   4695 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4696 
   4697 	rxs->rxs_mbuf = m;
   4698 
   4699 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4700 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4701 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4702 	if (error) {
   4703 		/* XXX XXX XXX */
   4704 		aprint_error_dev(sc->sc_dev,
   4705 		    "unable to load rx DMA map %d, error = %d\n",
   4706 		    idx, error);
   4707 		panic("wm_add_rxbuf");
   4708 	}
   4709 
   4710 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4711 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4712 
   4713 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4714 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4715 			wm_init_rxdesc(rxq, idx);
   4716 	} else
   4717 		wm_init_rxdesc(rxq, idx);
   4718 
   4719 	return 0;
   4720 }
   4721 
   4722 /*
   4723  * wm_rxdrain:
   4724  *
   4725  *	Drain the receive queue.
   4726  */
   4727 static void
   4728 wm_rxdrain(struct wm_rxqueue *rxq)
   4729 {
   4730 	struct wm_softc *sc = rxq->rxq_sc;
   4731 	struct wm_rxsoft *rxs;
   4732 	int i;
   4733 
   4734 	KASSERT(mutex_owned(rxq->rxq_lock));
   4735 
   4736 	for (i = 0; i < WM_NRXDESC; i++) {
   4737 		rxs = &rxq->rxq_soft[i];
   4738 		if (rxs->rxs_mbuf != NULL) {
   4739 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4740 			m_freem(rxs->rxs_mbuf);
   4741 			rxs->rxs_mbuf = NULL;
   4742 		}
   4743 	}
   4744 }
   4745 
   4746 
   4747 /*
   4748  * XXX copy from FreeBSD's sys/net/rss_config.c
   4749  */
   4750 /*
   4751  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4752  * effectiveness may be limited by algorithm choice and available entropy
   4753  * during the boot.
   4754  *
   4755  * XXXRW: And that we don't randomize it yet!
   4756  *
   4757  * This is the default Microsoft RSS specification key which is also
   4758  * the Chelsio T5 firmware default key.
   4759  */
   4760 #define RSS_KEYSIZE 40
   4761 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4762 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4763 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4764 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4765 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4766 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4767 };
   4768 
   4769 /*
   4770  * Caller must pass an array of size sizeof(rss_key).
   4771  *
   4772  * XXX
   4773  * As if_ixgbe may use this function, this function should not be
   4774  * if_wm specific function.
   4775  */
   4776 static void
   4777 wm_rss_getkey(uint8_t *key)
   4778 {
   4779 
   4780 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4781 }
   4782 
   4783 /*
   4784  * Setup registers for RSS.
   4785  *
   4786  * XXX not yet VMDq support
   4787  */
   4788 static void
   4789 wm_init_rss(struct wm_softc *sc)
   4790 {
   4791 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4792 	int i;
   4793 
   4794 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4795 
   4796 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4797 		int qid, reta_ent;
   4798 
   4799 		qid  = i % sc->sc_nqueues;
   4800 		switch(sc->sc_type) {
   4801 		case WM_T_82574:
   4802 			reta_ent = __SHIFTIN(qid,
   4803 			    RETA_ENT_QINDEX_MASK_82574);
   4804 			break;
   4805 		case WM_T_82575:
   4806 			reta_ent = __SHIFTIN(qid,
   4807 			    RETA_ENT_QINDEX1_MASK_82575);
   4808 			break;
   4809 		default:
   4810 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4811 			break;
   4812 		}
   4813 
   4814 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4815 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4816 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4817 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4818 	}
   4819 
   4820 	wm_rss_getkey((uint8_t *)rss_key);
   4821 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4822 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4823 
   4824 	if (sc->sc_type == WM_T_82574)
   4825 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4826 	else
   4827 		mrqc = MRQC_ENABLE_RSS_MQ;
   4828 
   4829 	/*
   4830 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4831 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4832 	 */
   4833 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4834 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4835 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4836 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4837 
   4838 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4839 }
   4840 
   4841 /*
   4842  * Adjust TX and RX queue numbers which the system actulally uses.
   4843  *
   4844  * The numbers are affected by below parameters.
   4845  *     - The nubmer of hardware queues
   4846  *     - The number of MSI-X vectors (= "nvectors" argument)
   4847  *     - ncpu
   4848  */
   4849 static void
   4850 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4851 {
   4852 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4853 
   4854 	if (nvectors < 2) {
   4855 		sc->sc_nqueues = 1;
   4856 		return;
   4857 	}
   4858 
   4859 	switch(sc->sc_type) {
   4860 	case WM_T_82572:
   4861 		hw_ntxqueues = 2;
   4862 		hw_nrxqueues = 2;
   4863 		break;
   4864 	case WM_T_82574:
   4865 		hw_ntxqueues = 2;
   4866 		hw_nrxqueues = 2;
   4867 		break;
   4868 	case WM_T_82575:
   4869 		hw_ntxqueues = 4;
   4870 		hw_nrxqueues = 4;
   4871 		break;
   4872 	case WM_T_82576:
   4873 		hw_ntxqueues = 16;
   4874 		hw_nrxqueues = 16;
   4875 		break;
   4876 	case WM_T_82580:
   4877 	case WM_T_I350:
   4878 	case WM_T_I354:
   4879 		hw_ntxqueues = 8;
   4880 		hw_nrxqueues = 8;
   4881 		break;
   4882 	case WM_T_I210:
   4883 		hw_ntxqueues = 4;
   4884 		hw_nrxqueues = 4;
   4885 		break;
   4886 	case WM_T_I211:
   4887 		hw_ntxqueues = 2;
   4888 		hw_nrxqueues = 2;
   4889 		break;
   4890 		/*
   4891 		 * As below ethernet controllers does not support MSI-X,
   4892 		 * this driver let them not use multiqueue.
   4893 		 *     - WM_T_80003
   4894 		 *     - WM_T_ICH8
   4895 		 *     - WM_T_ICH9
   4896 		 *     - WM_T_ICH10
   4897 		 *     - WM_T_PCH
   4898 		 *     - WM_T_PCH2
   4899 		 *     - WM_T_PCH_LPT
   4900 		 */
   4901 	default:
   4902 		hw_ntxqueues = 1;
   4903 		hw_nrxqueues = 1;
   4904 		break;
   4905 	}
   4906 
   4907 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4908 
   4909 	/*
   4910 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4911 	 * the number of queues used actually.
   4912 	 */
   4913 	if (nvectors < hw_nqueues + 1) {
   4914 		sc->sc_nqueues = nvectors - 1;
   4915 	} else {
   4916 		sc->sc_nqueues = hw_nqueues;
   4917 	}
   4918 
   4919 	/*
   4920 	 * As queues more then cpus cannot improve scaling, we limit
   4921 	 * the number of queues used actually.
   4922 	 */
   4923 	if (ncpu < sc->sc_nqueues)
   4924 		sc->sc_nqueues = ncpu;
   4925 }
   4926 
   4927 static inline bool
   4928 wm_is_using_msix(struct wm_softc *sc)
   4929 {
   4930 
   4931 	return (sc->sc_nintrs > 1);
   4932 }
   4933 
   4934 static inline bool
   4935 wm_is_using_multiqueue(struct wm_softc *sc)
   4936 {
   4937 
   4938 	return (sc->sc_nqueues > 1);
   4939 }
   4940 
   4941 static int
   4942 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4943 {
   4944 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4945 	wmq->wmq_id = qidx;
   4946 	wmq->wmq_intr_idx = intr_idx;
   4947 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4948 #ifdef WM_MPSAFE
   4949 	    | SOFTINT_MPSAFE
   4950 #endif
   4951 	    , wm_handle_queue, wmq);
   4952 	if (wmq->wmq_si != NULL)
   4953 		return 0;
   4954 
   4955 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4956 	    wmq->wmq_id);
   4957 
   4958 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4959 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4960 	return ENOMEM;
   4961 }
   4962 
   4963 /*
   4964  * Both single interrupt MSI and INTx can use this function.
   4965  */
   4966 static int
   4967 wm_setup_legacy(struct wm_softc *sc)
   4968 {
   4969 	pci_chipset_tag_t pc = sc->sc_pc;
   4970 	const char *intrstr = NULL;
   4971 	char intrbuf[PCI_INTRSTR_LEN];
   4972 	int error;
   4973 
   4974 	error = wm_alloc_txrx_queues(sc);
   4975 	if (error) {
   4976 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4977 		    error);
   4978 		return ENOMEM;
   4979 	}
   4980 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4981 	    sizeof(intrbuf));
   4982 #ifdef WM_MPSAFE
   4983 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4984 #endif
   4985 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4986 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4987 	if (sc->sc_ihs[0] == NULL) {
   4988 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4989 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4990 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4991 		return ENOMEM;
   4992 	}
   4993 
   4994 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4995 	sc->sc_nintrs = 1;
   4996 
   4997 	return wm_softint_establish(sc, 0, 0);
   4998 }
   4999 
   5000 static int
   5001 wm_setup_msix(struct wm_softc *sc)
   5002 {
   5003 	void *vih;
   5004 	kcpuset_t *affinity;
   5005 	int qidx, error, intr_idx, txrx_established;
   5006 	pci_chipset_tag_t pc = sc->sc_pc;
   5007 	const char *intrstr = NULL;
   5008 	char intrbuf[PCI_INTRSTR_LEN];
   5009 	char intr_xname[INTRDEVNAMEBUF];
   5010 
   5011 	if (sc->sc_nqueues < ncpu) {
   5012 		/*
   5013 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5014 		 * interrupts start from CPU#1.
   5015 		 */
   5016 		sc->sc_affinity_offset = 1;
   5017 	} else {
   5018 		/*
   5019 		 * In this case, this device use all CPUs. So, we unify
   5020 		 * affinitied cpu_index to msix vector number for readability.
   5021 		 */
   5022 		sc->sc_affinity_offset = 0;
   5023 	}
   5024 
   5025 	error = wm_alloc_txrx_queues(sc);
   5026 	if (error) {
   5027 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5028 		    error);
   5029 		return ENOMEM;
   5030 	}
   5031 
   5032 	kcpuset_create(&affinity, false);
   5033 	intr_idx = 0;
   5034 
   5035 	/*
   5036 	 * TX and RX
   5037 	 */
   5038 	txrx_established = 0;
   5039 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5040 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5041 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5042 
   5043 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5044 		    sizeof(intrbuf));
   5045 #ifdef WM_MPSAFE
   5046 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5047 		    PCI_INTR_MPSAFE, true);
   5048 #endif
   5049 		memset(intr_xname, 0, sizeof(intr_xname));
   5050 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5051 		    device_xname(sc->sc_dev), qidx);
   5052 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5053 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5054 		if (vih == NULL) {
   5055 			aprint_error_dev(sc->sc_dev,
   5056 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5057 			    intrstr ? " at " : "",
   5058 			    intrstr ? intrstr : "");
   5059 
   5060 			goto fail;
   5061 		}
   5062 		kcpuset_zero(affinity);
   5063 		/* Round-robin affinity */
   5064 		kcpuset_set(affinity, affinity_to);
   5065 		error = interrupt_distribute(vih, affinity, NULL);
   5066 		if (error == 0) {
   5067 			aprint_normal_dev(sc->sc_dev,
   5068 			    "for TX and RX interrupting at %s affinity to %u\n",
   5069 			    intrstr, affinity_to);
   5070 		} else {
   5071 			aprint_normal_dev(sc->sc_dev,
   5072 			    "for TX and RX interrupting at %s\n", intrstr);
   5073 		}
   5074 		sc->sc_ihs[intr_idx] = vih;
   5075 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5076 			goto fail;
   5077 		txrx_established++;
   5078 		intr_idx++;
   5079 	}
   5080 
   5081 	/*
   5082 	 * LINK
   5083 	 */
   5084 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5085 	    sizeof(intrbuf));
   5086 #ifdef WM_MPSAFE
   5087 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5088 #endif
   5089 	memset(intr_xname, 0, sizeof(intr_xname));
   5090 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5091 	    device_xname(sc->sc_dev));
   5092 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5093 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5094 	if (vih == NULL) {
   5095 		aprint_error_dev(sc->sc_dev,
   5096 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5097 		    intrstr ? " at " : "",
   5098 		    intrstr ? intrstr : "");
   5099 
   5100 		goto fail;
   5101 	}
   5102 	/* keep default affinity to LINK interrupt */
   5103 	aprint_normal_dev(sc->sc_dev,
   5104 	    "for LINK interrupting at %s\n", intrstr);
   5105 	sc->sc_ihs[intr_idx] = vih;
   5106 	sc->sc_link_intr_idx = intr_idx;
   5107 
   5108 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5109 	kcpuset_destroy(affinity);
   5110 	return 0;
   5111 
   5112  fail:
   5113 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5114 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5115 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5116 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5117 	}
   5118 
   5119 	kcpuset_destroy(affinity);
   5120 	return ENOMEM;
   5121 }
   5122 
   5123 static void
   5124 wm_turnon(struct wm_softc *sc)
   5125 {
   5126 	int i;
   5127 
   5128 	KASSERT(WM_CORE_LOCKED(sc));
   5129 
   5130 	/*
   5131 	 * must unset stopping flags in ascending order.
   5132 	 */
   5133 	for(i = 0; i < sc->sc_nqueues; i++) {
   5134 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5135 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5136 
   5137 		mutex_enter(txq->txq_lock);
   5138 		txq->txq_stopping = false;
   5139 		mutex_exit(txq->txq_lock);
   5140 
   5141 		mutex_enter(rxq->rxq_lock);
   5142 		rxq->rxq_stopping = false;
   5143 		mutex_exit(rxq->rxq_lock);
   5144 	}
   5145 
   5146 	sc->sc_core_stopping = false;
   5147 }
   5148 
   5149 static void
   5150 wm_turnoff(struct wm_softc *sc)
   5151 {
   5152 	int i;
   5153 
   5154 	KASSERT(WM_CORE_LOCKED(sc));
   5155 
   5156 	sc->sc_core_stopping = true;
   5157 
   5158 	/*
   5159 	 * must set stopping flags in ascending order.
   5160 	 */
   5161 	for(i = 0; i < sc->sc_nqueues; i++) {
   5162 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5163 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5164 
   5165 		mutex_enter(rxq->rxq_lock);
   5166 		rxq->rxq_stopping = true;
   5167 		mutex_exit(rxq->rxq_lock);
   5168 
   5169 		mutex_enter(txq->txq_lock);
   5170 		txq->txq_stopping = true;
   5171 		mutex_exit(txq->txq_lock);
   5172 	}
   5173 }
   5174 
   5175 /*
   5176  * write interrupt interval value to ITR or EITR
   5177  */
   5178 static void
   5179 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5180 {
   5181 
   5182 	if (!wmq->wmq_set_itr)
   5183 		return;
   5184 
   5185 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5186 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5187 
   5188 		/*
   5189 		 * 82575 doesn't have CNT_INGR field.
   5190 		 * So, overwrite counter field by software.
   5191 		 */
   5192 		if (sc->sc_type == WM_T_82575)
   5193 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5194 		else
   5195 			eitr |= EITR_CNT_INGR;
   5196 
   5197 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5198 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5199 		/*
   5200 		 * 82574 has both ITR and EITR. SET EITR when we use
   5201 		 * the multi queue function with MSI-X.
   5202 		 */
   5203 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5204 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5205 	} else {
   5206 		KASSERT(wmq->wmq_id == 0);
   5207 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5208 	}
   5209 
   5210 	wmq->wmq_set_itr = false;
   5211 }
   5212 
   5213 /*
   5214  * TODO
   5215  * Below dynamic calculation of itr is almost the same as linux igb,
   5216  * however it does not fit to wm(4). So, we will have been disable AIM
   5217  * until we will find appropriate calculation of itr.
   5218  */
   5219 /*
   5220  * calculate interrupt interval value to be going to write register in
   5221  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5222  */
   5223 static void
   5224 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5225 {
   5226 #ifdef NOTYET
   5227 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5228 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5229 	uint32_t avg_size = 0;
   5230 	uint32_t new_itr;
   5231 
   5232 	if (rxq->rxq_packets)
   5233 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5234 	if (txq->txq_packets)
   5235 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5236 
   5237 	if (avg_size == 0) {
   5238 		new_itr = 450; /* restore default value */
   5239 		goto out;
   5240 	}
   5241 
   5242 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5243 	avg_size += 24;
   5244 
   5245 	/* Don't starve jumbo frames */
   5246 	avg_size = min(avg_size, 3000);
   5247 
   5248 	/* Give a little boost to mid-size frames */
   5249 	if ((avg_size > 300) && (avg_size < 1200))
   5250 		new_itr = avg_size / 3;
   5251 	else
   5252 		new_itr = avg_size / 2;
   5253 
   5254 out:
   5255 	/*
   5256 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5257 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5258 	 */
   5259 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5260 		new_itr *= 4;
   5261 
   5262 	if (new_itr != wmq->wmq_itr) {
   5263 		wmq->wmq_itr = new_itr;
   5264 		wmq->wmq_set_itr = true;
   5265 	} else
   5266 		wmq->wmq_set_itr = false;
   5267 
   5268 	rxq->rxq_packets = 0;
   5269 	rxq->rxq_bytes = 0;
   5270 	txq->txq_packets = 0;
   5271 	txq->txq_bytes = 0;
   5272 #endif
   5273 }
   5274 
   5275 /*
   5276  * wm_init:		[ifnet interface function]
   5277  *
   5278  *	Initialize the interface.
   5279  */
   5280 static int
   5281 wm_init(struct ifnet *ifp)
   5282 {
   5283 	struct wm_softc *sc = ifp->if_softc;
   5284 	int ret;
   5285 
   5286 	WM_CORE_LOCK(sc);
   5287 	ret = wm_init_locked(ifp);
   5288 	WM_CORE_UNLOCK(sc);
   5289 
   5290 	return ret;
   5291 }
   5292 
   5293 static int
   5294 wm_init_locked(struct ifnet *ifp)
   5295 {
   5296 	struct wm_softc *sc = ifp->if_softc;
   5297 	int i, j, trynum, error = 0;
   5298 	uint32_t reg;
   5299 
   5300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5301 		device_xname(sc->sc_dev), __func__));
   5302 	KASSERT(WM_CORE_LOCKED(sc));
   5303 
   5304 	/*
   5305 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5306 	 * There is a small but measurable benefit to avoiding the adjusment
   5307 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5308 	 * on such platforms.  One possibility is that the DMA itself is
   5309 	 * slightly more efficient if the front of the entire packet (instead
   5310 	 * of the front of the headers) is aligned.
   5311 	 *
   5312 	 * Note we must always set align_tweak to 0 if we are using
   5313 	 * jumbo frames.
   5314 	 */
   5315 #ifdef __NO_STRICT_ALIGNMENT
   5316 	sc->sc_align_tweak = 0;
   5317 #else
   5318 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5319 		sc->sc_align_tweak = 0;
   5320 	else
   5321 		sc->sc_align_tweak = 2;
   5322 #endif /* __NO_STRICT_ALIGNMENT */
   5323 
   5324 	/* Cancel any pending I/O. */
   5325 	wm_stop_locked(ifp, 0);
   5326 
   5327 	/* update statistics before reset */
   5328 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5329 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5330 
   5331 	/* PCH_SPT hardware workaround */
   5332 	if (sc->sc_type == WM_T_PCH_SPT)
   5333 		wm_flush_desc_rings(sc);
   5334 
   5335 	/* Reset the chip to a known state. */
   5336 	wm_reset(sc);
   5337 
   5338 	/*
   5339 	 * AMT based hardware can now take control from firmware
   5340 	 * Do this after reset.
   5341 	 */
   5342 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5343 		wm_get_hw_control(sc);
   5344 
   5345 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5346 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5347 		wm_legacy_irq_quirk_spt(sc);
   5348 
   5349 	/* Init hardware bits */
   5350 	wm_initialize_hardware_bits(sc);
   5351 
   5352 	/* Reset the PHY. */
   5353 	if (sc->sc_flags & WM_F_HAS_MII)
   5354 		wm_gmii_reset(sc);
   5355 
   5356 	/* Calculate (E)ITR value */
   5357 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5358 		/*
   5359 		 * For NEWQUEUE's EITR (except for 82575).
   5360 		 * 82575's EITR should be set same throttling value as other
   5361 		 * old controllers' ITR because the interrupt/sec calculation
   5362 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5363 		 *
   5364 		 * 82574's EITR should be set same throttling value as ITR.
   5365 		 *
   5366 		 * For N interrupts/sec, set this value to:
   5367 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5368 		 */
   5369 		sc->sc_itr_init = 450;
   5370 	} else if (sc->sc_type >= WM_T_82543) {
   5371 		/*
   5372 		 * Set up the interrupt throttling register (units of 256ns)
   5373 		 * Note that a footnote in Intel's documentation says this
   5374 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5375 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5376 		 * that that is also true for the 1024ns units of the other
   5377 		 * interrupt-related timer registers -- so, really, we ought
   5378 		 * to divide this value by 4 when the link speed is low.
   5379 		 *
   5380 		 * XXX implement this division at link speed change!
   5381 		 */
   5382 
   5383 		/*
   5384 		 * For N interrupts/sec, set this value to:
   5385 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5386 		 * absolute and packet timer values to this value
   5387 		 * divided by 4 to get "simple timer" behavior.
   5388 		 */
   5389 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5390 	}
   5391 
   5392 	error = wm_init_txrx_queues(sc);
   5393 	if (error)
   5394 		goto out;
   5395 
   5396 	/*
   5397 	 * Clear out the VLAN table -- we don't use it (yet).
   5398 	 */
   5399 	CSR_WRITE(sc, WMREG_VET, 0);
   5400 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5401 		trynum = 10; /* Due to hw errata */
   5402 	else
   5403 		trynum = 1;
   5404 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5405 		for (j = 0; j < trynum; j++)
   5406 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5407 
   5408 	/*
   5409 	 * Set up flow-control parameters.
   5410 	 *
   5411 	 * XXX Values could probably stand some tuning.
   5412 	 */
   5413 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5414 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5415 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5416 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5417 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5418 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5419 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5420 	}
   5421 
   5422 	sc->sc_fcrtl = FCRTL_DFLT;
   5423 	if (sc->sc_type < WM_T_82543) {
   5424 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5425 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5426 	} else {
   5427 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5428 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5429 	}
   5430 
   5431 	if (sc->sc_type == WM_T_80003)
   5432 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5433 	else
   5434 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5435 
   5436 	/* Writes the control register. */
   5437 	wm_set_vlan(sc);
   5438 
   5439 	if (sc->sc_flags & WM_F_HAS_MII) {
   5440 		uint16_t kmreg;
   5441 
   5442 		switch (sc->sc_type) {
   5443 		case WM_T_80003:
   5444 		case WM_T_ICH8:
   5445 		case WM_T_ICH9:
   5446 		case WM_T_ICH10:
   5447 		case WM_T_PCH:
   5448 		case WM_T_PCH2:
   5449 		case WM_T_PCH_LPT:
   5450 		case WM_T_PCH_SPT:
   5451 			/*
   5452 			 * Set the mac to wait the maximum time between each
   5453 			 * iteration and increase the max iterations when
   5454 			 * polling the phy; this fixes erroneous timeouts at
   5455 			 * 10Mbps.
   5456 			 */
   5457 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5458 			    0xFFFF);
   5459 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5460 			    &kmreg);
   5461 			kmreg |= 0x3F;
   5462 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5463 			    kmreg);
   5464 			break;
   5465 		default:
   5466 			break;
   5467 		}
   5468 
   5469 		if (sc->sc_type == WM_T_80003) {
   5470 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5471 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5472 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5473 
   5474 			/* Bypass RX and TX FIFO's */
   5475 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5476 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5477 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5478 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5479 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5480 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5481 		}
   5482 	}
   5483 #if 0
   5484 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5485 #endif
   5486 
   5487 	/* Set up checksum offload parameters. */
   5488 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5489 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5490 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5491 		reg |= RXCSUM_IPOFL;
   5492 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5493 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5494 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5495 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5496 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5497 
   5498 	/* Set registers about MSI-X */
   5499 	if (wm_is_using_msix(sc)) {
   5500 		uint32_t ivar;
   5501 		struct wm_queue *wmq;
   5502 		int qid, qintr_idx;
   5503 
   5504 		if (sc->sc_type == WM_T_82575) {
   5505 			/* Interrupt control */
   5506 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5507 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5508 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5509 
   5510 			/* TX and RX */
   5511 			for (i = 0; i < sc->sc_nqueues; i++) {
   5512 				wmq = &sc->sc_queue[i];
   5513 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5514 				    EITR_TX_QUEUE(wmq->wmq_id)
   5515 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5516 			}
   5517 			/* Link status */
   5518 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5519 			    EITR_OTHER);
   5520 		} else if (sc->sc_type == WM_T_82574) {
   5521 			/* Interrupt control */
   5522 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5523 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5524 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5525 
   5526 			/*
   5527 			 * workaround issue with spurious interrupts
   5528 			 * in MSI-X mode.
   5529 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5530 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5531 			 */
   5532 			reg = CSR_READ(sc, WMREG_RFCTL);
   5533 			reg |= WMREG_RFCTL_ACKDIS;
   5534 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5535 
   5536 			ivar = 0;
   5537 			/* TX and RX */
   5538 			for (i = 0; i < sc->sc_nqueues; i++) {
   5539 				wmq = &sc->sc_queue[i];
   5540 				qid = wmq->wmq_id;
   5541 				qintr_idx = wmq->wmq_intr_idx;
   5542 
   5543 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5544 				    IVAR_TX_MASK_Q_82574(qid));
   5545 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5546 				    IVAR_RX_MASK_Q_82574(qid));
   5547 			}
   5548 			/* Link status */
   5549 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5550 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5551 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5552 		} else {
   5553 			/* Interrupt control */
   5554 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5555 			    | GPIE_EIAME | GPIE_PBA);
   5556 
   5557 			switch (sc->sc_type) {
   5558 			case WM_T_82580:
   5559 			case WM_T_I350:
   5560 			case WM_T_I354:
   5561 			case WM_T_I210:
   5562 			case WM_T_I211:
   5563 				/* TX and RX */
   5564 				for (i = 0; i < sc->sc_nqueues; i++) {
   5565 					wmq = &sc->sc_queue[i];
   5566 					qid = wmq->wmq_id;
   5567 					qintr_idx = wmq->wmq_intr_idx;
   5568 
   5569 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5570 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5571 					ivar |= __SHIFTIN((qintr_idx
   5572 						| IVAR_VALID),
   5573 					    IVAR_TX_MASK_Q(qid));
   5574 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5575 					ivar |= __SHIFTIN((qintr_idx
   5576 						| IVAR_VALID),
   5577 					    IVAR_RX_MASK_Q(qid));
   5578 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5579 				}
   5580 				break;
   5581 			case WM_T_82576:
   5582 				/* TX and RX */
   5583 				for (i = 0; i < sc->sc_nqueues; i++) {
   5584 					wmq = &sc->sc_queue[i];
   5585 					qid = wmq->wmq_id;
   5586 					qintr_idx = wmq->wmq_intr_idx;
   5587 
   5588 					ivar = CSR_READ(sc,
   5589 					    WMREG_IVAR_Q_82576(qid));
   5590 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5591 					ivar |= __SHIFTIN((qintr_idx
   5592 						| IVAR_VALID),
   5593 					    IVAR_TX_MASK_Q_82576(qid));
   5594 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5595 					ivar |= __SHIFTIN((qintr_idx
   5596 						| IVAR_VALID),
   5597 					    IVAR_RX_MASK_Q_82576(qid));
   5598 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5599 					    ivar);
   5600 				}
   5601 				break;
   5602 			default:
   5603 				break;
   5604 			}
   5605 
   5606 			/* Link status */
   5607 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5608 			    IVAR_MISC_OTHER);
   5609 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5610 		}
   5611 
   5612 		if (wm_is_using_multiqueue(sc)) {
   5613 			wm_init_rss(sc);
   5614 
   5615 			/*
   5616 			** NOTE: Receive Full-Packet Checksum Offload
   5617 			** is mutually exclusive with Multiqueue. However
   5618 			** this is not the same as TCP/IP checksums which
   5619 			** still work.
   5620 			*/
   5621 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5622 			reg |= RXCSUM_PCSD;
   5623 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5624 		}
   5625 	}
   5626 
   5627 	/* Set up the interrupt registers. */
   5628 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5629 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5630 	    ICR_RXO | ICR_RXT0;
   5631 	if (wm_is_using_msix(sc)) {
   5632 		uint32_t mask;
   5633 		struct wm_queue *wmq;
   5634 
   5635 		switch (sc->sc_type) {
   5636 		case WM_T_82574:
   5637 			mask = 0;
   5638 			for (i = 0; i < sc->sc_nqueues; i++) {
   5639 				wmq = &sc->sc_queue[i];
   5640 				mask |= ICR_TXQ(wmq->wmq_id);
   5641 				mask |= ICR_RXQ(wmq->wmq_id);
   5642 			}
   5643 			mask |= ICR_OTHER;
   5644 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5645 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5646 			break;
   5647 		default:
   5648 			if (sc->sc_type == WM_T_82575) {
   5649 				mask = 0;
   5650 				for (i = 0; i < sc->sc_nqueues; i++) {
   5651 					wmq = &sc->sc_queue[i];
   5652 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5653 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5654 				}
   5655 				mask |= EITR_OTHER;
   5656 			} else {
   5657 				mask = 0;
   5658 				for (i = 0; i < sc->sc_nqueues; i++) {
   5659 					wmq = &sc->sc_queue[i];
   5660 					mask |= 1 << wmq->wmq_intr_idx;
   5661 				}
   5662 				mask |= 1 << sc->sc_link_intr_idx;
   5663 			}
   5664 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5665 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5666 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5667 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5668 			break;
   5669 		}
   5670 	} else
   5671 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5672 
   5673 	/* Set up the inter-packet gap. */
   5674 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5675 
   5676 	if (sc->sc_type >= WM_T_82543) {
   5677 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5678 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5679 			wm_itrs_writereg(sc, wmq);
   5680 		}
   5681 		/*
   5682 		 * Link interrupts occur much less than TX
   5683 		 * interrupts and RX interrupts. So, we don't
   5684 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5685 		 * FreeBSD's if_igb.
   5686 		 */
   5687 	}
   5688 
   5689 	/* Set the VLAN ethernetype. */
   5690 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5691 
   5692 	/*
   5693 	 * Set up the transmit control register; we start out with
   5694 	 * a collision distance suitable for FDX, but update it whe
   5695 	 * we resolve the media type.
   5696 	 */
   5697 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5698 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5699 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5700 	if (sc->sc_type >= WM_T_82571)
   5701 		sc->sc_tctl |= TCTL_MULR;
   5702 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5703 
   5704 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5705 		/* Write TDT after TCTL.EN is set. See the document. */
   5706 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5707 	}
   5708 
   5709 	if (sc->sc_type == WM_T_80003) {
   5710 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5711 		reg &= ~TCTL_EXT_GCEX_MASK;
   5712 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5713 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5714 	}
   5715 
   5716 	/* Set the media. */
   5717 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5718 		goto out;
   5719 
   5720 	/* Configure for OS presence */
   5721 	wm_init_manageability(sc);
   5722 
   5723 	/*
   5724 	 * Set up the receive control register; we actually program
   5725 	 * the register when we set the receive filter.  Use multicast
   5726 	 * address offset type 0.
   5727 	 *
   5728 	 * Only the i82544 has the ability to strip the incoming
   5729 	 * CRC, so we don't enable that feature.
   5730 	 */
   5731 	sc->sc_mchash_type = 0;
   5732 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5733 	    | RCTL_MO(sc->sc_mchash_type);
   5734 
   5735 	/*
   5736 	 * 82574 use one buffer extended Rx descriptor.
   5737 	 */
   5738 	if (sc->sc_type == WM_T_82574)
   5739 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5740 
   5741 	/*
   5742 	 * The I350 has a bug where it always strips the CRC whether
   5743 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5744 	 */
   5745 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5746 	    || (sc->sc_type == WM_T_I210))
   5747 		sc->sc_rctl |= RCTL_SECRC;
   5748 
   5749 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5750 	    && (ifp->if_mtu > ETHERMTU)) {
   5751 		sc->sc_rctl |= RCTL_LPE;
   5752 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5753 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5754 	}
   5755 
   5756 	if (MCLBYTES == 2048) {
   5757 		sc->sc_rctl |= RCTL_2k;
   5758 	} else {
   5759 		if (sc->sc_type >= WM_T_82543) {
   5760 			switch (MCLBYTES) {
   5761 			case 4096:
   5762 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5763 				break;
   5764 			case 8192:
   5765 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5766 				break;
   5767 			case 16384:
   5768 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5769 				break;
   5770 			default:
   5771 				panic("wm_init: MCLBYTES %d unsupported",
   5772 				    MCLBYTES);
   5773 				break;
   5774 			}
   5775 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5776 	}
   5777 
   5778 	/* Enable ECC */
   5779 	switch (sc->sc_type) {
   5780 	case WM_T_82571:
   5781 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5782 		reg |= PBA_ECC_CORR_EN;
   5783 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5784 		break;
   5785 	case WM_T_PCH_LPT:
   5786 	case WM_T_PCH_SPT:
   5787 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5788 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5789 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5790 
   5791 		sc->sc_ctrl |= CTRL_MEHE;
   5792 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5793 		break;
   5794 	default:
   5795 		break;
   5796 	}
   5797 
   5798 	/* On 575 and later set RDT only if RX enabled */
   5799 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5800 		int qidx;
   5801 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5802 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5803 			for (i = 0; i < WM_NRXDESC; i++) {
   5804 				mutex_enter(rxq->rxq_lock);
   5805 				wm_init_rxdesc(rxq, i);
   5806 				mutex_exit(rxq->rxq_lock);
   5807 
   5808 			}
   5809 		}
   5810 	}
   5811 
   5812 	/* Set the receive filter. */
   5813 	wm_set_filter(sc);
   5814 
   5815 	wm_turnon(sc);
   5816 
   5817 	/* Start the one second link check clock. */
   5818 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5819 
   5820 	/* ...all done! */
   5821 	ifp->if_flags |= IFF_RUNNING;
   5822 	ifp->if_flags &= ~IFF_OACTIVE;
   5823 
   5824  out:
   5825 	sc->sc_if_flags = ifp->if_flags;
   5826 	if (error)
   5827 		log(LOG_ERR, "%s: interface not running\n",
   5828 		    device_xname(sc->sc_dev));
   5829 	return error;
   5830 }
   5831 
   5832 /*
   5833  * wm_stop:		[ifnet interface function]
   5834  *
   5835  *	Stop transmission on the interface.
   5836  */
   5837 static void
   5838 wm_stop(struct ifnet *ifp, int disable)
   5839 {
   5840 	struct wm_softc *sc = ifp->if_softc;
   5841 
   5842 	WM_CORE_LOCK(sc);
   5843 	wm_stop_locked(ifp, disable);
   5844 	WM_CORE_UNLOCK(sc);
   5845 }
   5846 
   5847 static void
   5848 wm_stop_locked(struct ifnet *ifp, int disable)
   5849 {
   5850 	struct wm_softc *sc = ifp->if_softc;
   5851 	struct wm_txsoft *txs;
   5852 	int i, qidx;
   5853 
   5854 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5855 		device_xname(sc->sc_dev), __func__));
   5856 	KASSERT(WM_CORE_LOCKED(sc));
   5857 
   5858 	wm_turnoff(sc);
   5859 
   5860 	/* Stop the one second clock. */
   5861 	callout_stop(&sc->sc_tick_ch);
   5862 
   5863 	/* Stop the 82547 Tx FIFO stall check timer. */
   5864 	if (sc->sc_type == WM_T_82547)
   5865 		callout_stop(&sc->sc_txfifo_ch);
   5866 
   5867 	if (sc->sc_flags & WM_F_HAS_MII) {
   5868 		/* Down the MII. */
   5869 		mii_down(&sc->sc_mii);
   5870 	} else {
   5871 #if 0
   5872 		/* Should we clear PHY's status properly? */
   5873 		wm_reset(sc);
   5874 #endif
   5875 	}
   5876 
   5877 	/* Stop the transmit and receive processes. */
   5878 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5879 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5880 	sc->sc_rctl &= ~RCTL_EN;
   5881 
   5882 	/*
   5883 	 * Clear the interrupt mask to ensure the device cannot assert its
   5884 	 * interrupt line.
   5885 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5886 	 * service any currently pending or shared interrupt.
   5887 	 */
   5888 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5889 	sc->sc_icr = 0;
   5890 	if (wm_is_using_msix(sc)) {
   5891 		if (sc->sc_type != WM_T_82574) {
   5892 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5893 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5894 		} else
   5895 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5896 	}
   5897 
   5898 	/* Release any queued transmit buffers. */
   5899 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5900 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5901 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5902 		mutex_enter(txq->txq_lock);
   5903 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5904 			txs = &txq->txq_soft[i];
   5905 			if (txs->txs_mbuf != NULL) {
   5906 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5907 				m_freem(txs->txs_mbuf);
   5908 				txs->txs_mbuf = NULL;
   5909 			}
   5910 		}
   5911 		mutex_exit(txq->txq_lock);
   5912 	}
   5913 
   5914 	/* Mark the interface as down and cancel the watchdog timer. */
   5915 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5916 	ifp->if_timer = 0;
   5917 
   5918 	if (disable) {
   5919 		for (i = 0; i < sc->sc_nqueues; i++) {
   5920 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5921 			mutex_enter(rxq->rxq_lock);
   5922 			wm_rxdrain(rxq);
   5923 			mutex_exit(rxq->rxq_lock);
   5924 		}
   5925 	}
   5926 
   5927 #if 0 /* notyet */
   5928 	if (sc->sc_type >= WM_T_82544)
   5929 		CSR_WRITE(sc, WMREG_WUC, 0);
   5930 #endif
   5931 }
   5932 
   5933 static void
   5934 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5935 {
   5936 	struct mbuf *m;
   5937 	int i;
   5938 
   5939 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5940 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5941 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5942 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5943 		    m->m_data, m->m_len, m->m_flags);
   5944 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5945 	    i, i == 1 ? "" : "s");
   5946 }
   5947 
   5948 /*
   5949  * wm_82547_txfifo_stall:
   5950  *
   5951  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5952  *	reset the FIFO pointers, and restart packet transmission.
   5953  */
   5954 static void
   5955 wm_82547_txfifo_stall(void *arg)
   5956 {
   5957 	struct wm_softc *sc = arg;
   5958 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5959 
   5960 	mutex_enter(txq->txq_lock);
   5961 
   5962 	if (txq->txq_stopping)
   5963 		goto out;
   5964 
   5965 	if (txq->txq_fifo_stall) {
   5966 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5967 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5968 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5969 			/*
   5970 			 * Packets have drained.  Stop transmitter, reset
   5971 			 * FIFO pointers, restart transmitter, and kick
   5972 			 * the packet queue.
   5973 			 */
   5974 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5975 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5976 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5977 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5978 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5979 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5980 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5981 			CSR_WRITE_FLUSH(sc);
   5982 
   5983 			txq->txq_fifo_head = 0;
   5984 			txq->txq_fifo_stall = 0;
   5985 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5986 		} else {
   5987 			/*
   5988 			 * Still waiting for packets to drain; try again in
   5989 			 * another tick.
   5990 			 */
   5991 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5992 		}
   5993 	}
   5994 
   5995 out:
   5996 	mutex_exit(txq->txq_lock);
   5997 }
   5998 
   5999 /*
   6000  * wm_82547_txfifo_bugchk:
   6001  *
   6002  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6003  *	prevent enqueueing a packet that would wrap around the end
   6004  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6005  *
   6006  *	We do this by checking the amount of space before the end
   6007  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6008  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6009  *	the internal FIFO pointers to the beginning, and restart
   6010  *	transmission on the interface.
   6011  */
   6012 #define	WM_FIFO_HDR		0x10
   6013 #define	WM_82547_PAD_LEN	0x3e0
   6014 static int
   6015 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6016 {
   6017 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6018 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6019 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6020 
   6021 	/* Just return if already stalled. */
   6022 	if (txq->txq_fifo_stall)
   6023 		return 1;
   6024 
   6025 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6026 		/* Stall only occurs in half-duplex mode. */
   6027 		goto send_packet;
   6028 	}
   6029 
   6030 	if (len >= WM_82547_PAD_LEN + space) {
   6031 		txq->txq_fifo_stall = 1;
   6032 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6033 		return 1;
   6034 	}
   6035 
   6036  send_packet:
   6037 	txq->txq_fifo_head += len;
   6038 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6039 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6040 
   6041 	return 0;
   6042 }
   6043 
   6044 static int
   6045 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6046 {
   6047 	int error;
   6048 
   6049 	/*
   6050 	 * Allocate the control data structures, and create and load the
   6051 	 * DMA map for it.
   6052 	 *
   6053 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6054 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6055 	 * both sets within the same 4G segment.
   6056 	 */
   6057 	if (sc->sc_type < WM_T_82544)
   6058 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6059 	else
   6060 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6061 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6062 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6063 	else
   6064 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6065 
   6066 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6067 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6068 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6069 		aprint_error_dev(sc->sc_dev,
   6070 		    "unable to allocate TX control data, error = %d\n",
   6071 		    error);
   6072 		goto fail_0;
   6073 	}
   6074 
   6075 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6076 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6077 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6078 		aprint_error_dev(sc->sc_dev,
   6079 		    "unable to map TX control data, error = %d\n", error);
   6080 		goto fail_1;
   6081 	}
   6082 
   6083 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6084 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6085 		aprint_error_dev(sc->sc_dev,
   6086 		    "unable to create TX control data DMA map, error = %d\n",
   6087 		    error);
   6088 		goto fail_2;
   6089 	}
   6090 
   6091 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6092 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6093 		aprint_error_dev(sc->sc_dev,
   6094 		    "unable to load TX control data DMA map, error = %d\n",
   6095 		    error);
   6096 		goto fail_3;
   6097 	}
   6098 
   6099 	return 0;
   6100 
   6101  fail_3:
   6102 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6103  fail_2:
   6104 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6105 	    WM_TXDESCS_SIZE(txq));
   6106  fail_1:
   6107 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6108  fail_0:
   6109 	return error;
   6110 }
   6111 
   6112 static void
   6113 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6114 {
   6115 
   6116 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6117 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6118 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6119 	    WM_TXDESCS_SIZE(txq));
   6120 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6121 }
   6122 
   6123 static int
   6124 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6125 {
   6126 	int error;
   6127 	size_t rxq_descs_size;
   6128 
   6129 	/*
   6130 	 * Allocate the control data structures, and create and load the
   6131 	 * DMA map for it.
   6132 	 *
   6133 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6134 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6135 	 * both sets within the same 4G segment.
   6136 	 */
   6137 	rxq->rxq_ndesc = WM_NRXDESC;
   6138 	if (sc->sc_type == WM_T_82574)
   6139 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6140 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6141 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6142 	else
   6143 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6144 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6145 
   6146 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6147 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6148 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6149 		aprint_error_dev(sc->sc_dev,
   6150 		    "unable to allocate RX control data, error = %d\n",
   6151 		    error);
   6152 		goto fail_0;
   6153 	}
   6154 
   6155 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6156 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6157 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6158 		aprint_error_dev(sc->sc_dev,
   6159 		    "unable to map RX control data, error = %d\n", error);
   6160 		goto fail_1;
   6161 	}
   6162 
   6163 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6164 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6165 		aprint_error_dev(sc->sc_dev,
   6166 		    "unable to create RX control data DMA map, error = %d\n",
   6167 		    error);
   6168 		goto fail_2;
   6169 	}
   6170 
   6171 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6172 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6173 		aprint_error_dev(sc->sc_dev,
   6174 		    "unable to load RX control data DMA map, error = %d\n",
   6175 		    error);
   6176 		goto fail_3;
   6177 	}
   6178 
   6179 	return 0;
   6180 
   6181  fail_3:
   6182 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6183  fail_2:
   6184 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6185 	    rxq_descs_size);
   6186  fail_1:
   6187 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6188  fail_0:
   6189 	return error;
   6190 }
   6191 
   6192 static void
   6193 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6194 {
   6195 
   6196 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6197 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6198 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6199 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6200 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6201 }
   6202 
   6203 
   6204 static int
   6205 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6206 {
   6207 	int i, error;
   6208 
   6209 	/* Create the transmit buffer DMA maps. */
   6210 	WM_TXQUEUELEN(txq) =
   6211 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6212 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6213 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6214 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6215 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6216 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6217 			aprint_error_dev(sc->sc_dev,
   6218 			    "unable to create Tx DMA map %d, error = %d\n",
   6219 			    i, error);
   6220 			goto fail;
   6221 		}
   6222 	}
   6223 
   6224 	return 0;
   6225 
   6226  fail:
   6227 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6228 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6229 			bus_dmamap_destroy(sc->sc_dmat,
   6230 			    txq->txq_soft[i].txs_dmamap);
   6231 	}
   6232 	return error;
   6233 }
   6234 
   6235 static void
   6236 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6237 {
   6238 	int i;
   6239 
   6240 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6241 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6242 			bus_dmamap_destroy(sc->sc_dmat,
   6243 			    txq->txq_soft[i].txs_dmamap);
   6244 	}
   6245 }
   6246 
   6247 static int
   6248 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6249 {
   6250 	int i, error;
   6251 
   6252 	/* Create the receive buffer DMA maps. */
   6253 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6254 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6255 			    MCLBYTES, 0, 0,
   6256 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6257 			aprint_error_dev(sc->sc_dev,
   6258 			    "unable to create Rx DMA map %d error = %d\n",
   6259 			    i, error);
   6260 			goto fail;
   6261 		}
   6262 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6263 	}
   6264 
   6265 	return 0;
   6266 
   6267  fail:
   6268 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6269 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6270 			bus_dmamap_destroy(sc->sc_dmat,
   6271 			    rxq->rxq_soft[i].rxs_dmamap);
   6272 	}
   6273 	return error;
   6274 }
   6275 
   6276 static void
   6277 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6278 {
   6279 	int i;
   6280 
   6281 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6282 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6283 			bus_dmamap_destroy(sc->sc_dmat,
   6284 			    rxq->rxq_soft[i].rxs_dmamap);
   6285 	}
   6286 }
   6287 
   6288 /*
   6289  * wm_alloc_quques:
   6290  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6291  */
   6292 static int
   6293 wm_alloc_txrx_queues(struct wm_softc *sc)
   6294 {
   6295 	int i, error, tx_done, rx_done;
   6296 
   6297 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6298 	    KM_SLEEP);
   6299 	if (sc->sc_queue == NULL) {
   6300 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6301 		error = ENOMEM;
   6302 		goto fail_0;
   6303 	}
   6304 
   6305 	/*
   6306 	 * For transmission
   6307 	 */
   6308 	error = 0;
   6309 	tx_done = 0;
   6310 	for (i = 0; i < sc->sc_nqueues; i++) {
   6311 #ifdef WM_EVENT_COUNTERS
   6312 		int j;
   6313 		const char *xname;
   6314 #endif
   6315 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6316 		txq->txq_sc = sc;
   6317 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6318 
   6319 		error = wm_alloc_tx_descs(sc, txq);
   6320 		if (error)
   6321 			break;
   6322 		error = wm_alloc_tx_buffer(sc, txq);
   6323 		if (error) {
   6324 			wm_free_tx_descs(sc, txq);
   6325 			break;
   6326 		}
   6327 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6328 		if (txq->txq_interq == NULL) {
   6329 			wm_free_tx_descs(sc, txq);
   6330 			wm_free_tx_buffer(sc, txq);
   6331 			error = ENOMEM;
   6332 			break;
   6333 		}
   6334 
   6335 #ifdef WM_EVENT_COUNTERS
   6336 		xname = device_xname(sc->sc_dev);
   6337 
   6338 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6339 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6340 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6341 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6342 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6343 
   6344 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6345 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6346 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6347 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6348 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6349 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6350 
   6351 		for (j = 0; j < WM_NTXSEGS; j++) {
   6352 			snprintf(txq->txq_txseg_evcnt_names[j],
   6353 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6354 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6355 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6356 		}
   6357 
   6358 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6359 
   6360 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6361 #endif /* WM_EVENT_COUNTERS */
   6362 
   6363 		tx_done++;
   6364 	}
   6365 	if (error)
   6366 		goto fail_1;
   6367 
   6368 	/*
   6369 	 * For recieve
   6370 	 */
   6371 	error = 0;
   6372 	rx_done = 0;
   6373 	for (i = 0; i < sc->sc_nqueues; i++) {
   6374 #ifdef WM_EVENT_COUNTERS
   6375 		const char *xname;
   6376 #endif
   6377 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6378 		rxq->rxq_sc = sc;
   6379 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6380 
   6381 		error = wm_alloc_rx_descs(sc, rxq);
   6382 		if (error)
   6383 			break;
   6384 
   6385 		error = wm_alloc_rx_buffer(sc, rxq);
   6386 		if (error) {
   6387 			wm_free_rx_descs(sc, rxq);
   6388 			break;
   6389 		}
   6390 
   6391 #ifdef WM_EVENT_COUNTERS
   6392 		xname = device_xname(sc->sc_dev);
   6393 
   6394 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6395 
   6396 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6397 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6398 #endif /* WM_EVENT_COUNTERS */
   6399 
   6400 		rx_done++;
   6401 	}
   6402 	if (error)
   6403 		goto fail_2;
   6404 
   6405 	return 0;
   6406 
   6407  fail_2:
   6408 	for (i = 0; i < rx_done; i++) {
   6409 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6410 		wm_free_rx_buffer(sc, rxq);
   6411 		wm_free_rx_descs(sc, rxq);
   6412 		if (rxq->rxq_lock)
   6413 			mutex_obj_free(rxq->rxq_lock);
   6414 	}
   6415  fail_1:
   6416 	for (i = 0; i < tx_done; i++) {
   6417 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6418 		pcq_destroy(txq->txq_interq);
   6419 		wm_free_tx_buffer(sc, txq);
   6420 		wm_free_tx_descs(sc, txq);
   6421 		if (txq->txq_lock)
   6422 			mutex_obj_free(txq->txq_lock);
   6423 	}
   6424 
   6425 	kmem_free(sc->sc_queue,
   6426 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6427  fail_0:
   6428 	return error;
   6429 }
   6430 
   6431 /*
   6432  * wm_free_quques:
   6433  *	Free {tx,rx}descs and {tx,rx} buffers
   6434  */
   6435 static void
   6436 wm_free_txrx_queues(struct wm_softc *sc)
   6437 {
   6438 	int i;
   6439 
   6440 	for (i = 0; i < sc->sc_nqueues; i++) {
   6441 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6442 
   6443 #ifdef WM_EVENT_COUNTERS
   6444 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6445 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6446 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6447 #endif /* WM_EVENT_COUNTERS */
   6448 
   6449 		wm_free_rx_buffer(sc, rxq);
   6450 		wm_free_rx_descs(sc, rxq);
   6451 		if (rxq->rxq_lock)
   6452 			mutex_obj_free(rxq->rxq_lock);
   6453 	}
   6454 
   6455 	for (i = 0; i < sc->sc_nqueues; i++) {
   6456 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6457 		struct mbuf *m;
   6458 #ifdef WM_EVENT_COUNTERS
   6459 		int j;
   6460 
   6461 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6462 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6463 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6464 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6465 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6466 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6467 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6468 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6469 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6470 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6471 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6472 
   6473 		for (j = 0; j < WM_NTXSEGS; j++)
   6474 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6475 
   6476 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6477 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6478 #endif /* WM_EVENT_COUNTERS */
   6479 
   6480 		/* drain txq_interq */
   6481 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6482 			m_freem(m);
   6483 		pcq_destroy(txq->txq_interq);
   6484 
   6485 		wm_free_tx_buffer(sc, txq);
   6486 		wm_free_tx_descs(sc, txq);
   6487 		if (txq->txq_lock)
   6488 			mutex_obj_free(txq->txq_lock);
   6489 	}
   6490 
   6491 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6492 }
   6493 
   6494 static void
   6495 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6496 {
   6497 
   6498 	KASSERT(mutex_owned(txq->txq_lock));
   6499 
   6500 	/* Initialize the transmit descriptor ring. */
   6501 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6502 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6503 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6504 	txq->txq_free = WM_NTXDESC(txq);
   6505 	txq->txq_next = 0;
   6506 }
   6507 
   6508 static void
   6509 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6510     struct wm_txqueue *txq)
   6511 {
   6512 
   6513 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6514 		device_xname(sc->sc_dev), __func__));
   6515 	KASSERT(mutex_owned(txq->txq_lock));
   6516 
   6517 	if (sc->sc_type < WM_T_82543) {
   6518 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6519 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6520 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6521 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6522 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6523 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6524 	} else {
   6525 		int qid = wmq->wmq_id;
   6526 
   6527 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6528 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6529 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6530 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6531 
   6532 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6533 			/*
   6534 			 * Don't write TDT before TCTL.EN is set.
   6535 			 * See the document.
   6536 			 */
   6537 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6538 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6539 			    | TXDCTL_WTHRESH(0));
   6540 		else {
   6541 			/* XXX should update with AIM? */
   6542 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6543 			if (sc->sc_type >= WM_T_82540) {
   6544 				/* should be same */
   6545 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6546 			}
   6547 
   6548 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6549 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6550 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6551 		}
   6552 	}
   6553 }
   6554 
   6555 static void
   6556 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6557 {
   6558 	int i;
   6559 
   6560 	KASSERT(mutex_owned(txq->txq_lock));
   6561 
   6562 	/* Initialize the transmit job descriptors. */
   6563 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6564 		txq->txq_soft[i].txs_mbuf = NULL;
   6565 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6566 	txq->txq_snext = 0;
   6567 	txq->txq_sdirty = 0;
   6568 }
   6569 
   6570 static void
   6571 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6572     struct wm_txqueue *txq)
   6573 {
   6574 
   6575 	KASSERT(mutex_owned(txq->txq_lock));
   6576 
   6577 	/*
   6578 	 * Set up some register offsets that are different between
   6579 	 * the i82542 and the i82543 and later chips.
   6580 	 */
   6581 	if (sc->sc_type < WM_T_82543)
   6582 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6583 	else
   6584 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6585 
   6586 	wm_init_tx_descs(sc, txq);
   6587 	wm_init_tx_regs(sc, wmq, txq);
   6588 	wm_init_tx_buffer(sc, txq);
   6589 }
   6590 
   6591 static void
   6592 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6593     struct wm_rxqueue *rxq)
   6594 {
   6595 
   6596 	KASSERT(mutex_owned(rxq->rxq_lock));
   6597 
   6598 	/*
   6599 	 * Initialize the receive descriptor and receive job
   6600 	 * descriptor rings.
   6601 	 */
   6602 	if (sc->sc_type < WM_T_82543) {
   6603 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6604 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6605 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6606 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6607 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6608 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6609 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6610 
   6611 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6612 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6613 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6614 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6615 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6616 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6617 	} else {
   6618 		int qid = wmq->wmq_id;
   6619 
   6620 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6621 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6622 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6623 
   6624 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6625 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6626 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6627 
   6628 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6629 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6630 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6631 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6632 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6633 			    | RXDCTL_WTHRESH(1));
   6634 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6635 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6636 		} else {
   6637 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6638 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6639 			/* XXX should update with AIM? */
   6640 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6641 			/* MUST be same */
   6642 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6643 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6644 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6645 		}
   6646 	}
   6647 }
   6648 
   6649 static int
   6650 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6651 {
   6652 	struct wm_rxsoft *rxs;
   6653 	int error, i;
   6654 
   6655 	KASSERT(mutex_owned(rxq->rxq_lock));
   6656 
   6657 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6658 		rxs = &rxq->rxq_soft[i];
   6659 		if (rxs->rxs_mbuf == NULL) {
   6660 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6661 				log(LOG_ERR, "%s: unable to allocate or map "
   6662 				    "rx buffer %d, error = %d\n",
   6663 				    device_xname(sc->sc_dev), i, error);
   6664 				/*
   6665 				 * XXX Should attempt to run with fewer receive
   6666 				 * XXX buffers instead of just failing.
   6667 				 */
   6668 				wm_rxdrain(rxq);
   6669 				return ENOMEM;
   6670 			}
   6671 		} else {
   6672 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6673 				wm_init_rxdesc(rxq, i);
   6674 			/*
   6675 			 * For 82575 and newer device, the RX descriptors
   6676 			 * must be initialized after the setting of RCTL.EN in
   6677 			 * wm_set_filter()
   6678 			 */
   6679 		}
   6680 	}
   6681 	rxq->rxq_ptr = 0;
   6682 	rxq->rxq_discard = 0;
   6683 	WM_RXCHAIN_RESET(rxq);
   6684 
   6685 	return 0;
   6686 }
   6687 
   6688 static int
   6689 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6690     struct wm_rxqueue *rxq)
   6691 {
   6692 
   6693 	KASSERT(mutex_owned(rxq->rxq_lock));
   6694 
   6695 	/*
   6696 	 * Set up some register offsets that are different between
   6697 	 * the i82542 and the i82543 and later chips.
   6698 	 */
   6699 	if (sc->sc_type < WM_T_82543)
   6700 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6701 	else
   6702 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6703 
   6704 	wm_init_rx_regs(sc, wmq, rxq);
   6705 	return wm_init_rx_buffer(sc, rxq);
   6706 }
   6707 
   6708 /*
   6709  * wm_init_quques:
   6710  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6711  */
   6712 static int
   6713 wm_init_txrx_queues(struct wm_softc *sc)
   6714 {
   6715 	int i, error = 0;
   6716 
   6717 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6718 		device_xname(sc->sc_dev), __func__));
   6719 
   6720 	for (i = 0; i < sc->sc_nqueues; i++) {
   6721 		struct wm_queue *wmq = &sc->sc_queue[i];
   6722 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6723 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6724 
   6725 		/*
   6726 		 * TODO
   6727 		 * Currently, use constant variable instead of AIM.
   6728 		 * Furthermore, the interrupt interval of multiqueue which use
   6729 		 * polling mode is less than default value.
   6730 		 * More tuning and AIM are required.
   6731 		 */
   6732 		if (wm_is_using_multiqueue(sc))
   6733 			wmq->wmq_itr = 50;
   6734 		else
   6735 			wmq->wmq_itr = sc->sc_itr_init;
   6736 		wmq->wmq_set_itr = true;
   6737 
   6738 		mutex_enter(txq->txq_lock);
   6739 		wm_init_tx_queue(sc, wmq, txq);
   6740 		mutex_exit(txq->txq_lock);
   6741 
   6742 		mutex_enter(rxq->rxq_lock);
   6743 		error = wm_init_rx_queue(sc, wmq, rxq);
   6744 		mutex_exit(rxq->rxq_lock);
   6745 		if (error)
   6746 			break;
   6747 	}
   6748 
   6749 	return error;
   6750 }
   6751 
   6752 /*
   6753  * wm_tx_offload:
   6754  *
   6755  *	Set up TCP/IP checksumming parameters for the
   6756  *	specified packet.
   6757  */
   6758 static int
   6759 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6760     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6761 {
   6762 	struct mbuf *m0 = txs->txs_mbuf;
   6763 	struct livengood_tcpip_ctxdesc *t;
   6764 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6765 	uint32_t ipcse;
   6766 	struct ether_header *eh;
   6767 	int offset, iphl;
   6768 	uint8_t fields;
   6769 
   6770 	/*
   6771 	 * XXX It would be nice if the mbuf pkthdr had offset
   6772 	 * fields for the protocol headers.
   6773 	 */
   6774 
   6775 	eh = mtod(m0, struct ether_header *);
   6776 	switch (htons(eh->ether_type)) {
   6777 	case ETHERTYPE_IP:
   6778 	case ETHERTYPE_IPV6:
   6779 		offset = ETHER_HDR_LEN;
   6780 		break;
   6781 
   6782 	case ETHERTYPE_VLAN:
   6783 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6784 		break;
   6785 
   6786 	default:
   6787 		/*
   6788 		 * Don't support this protocol or encapsulation.
   6789 		 */
   6790 		*fieldsp = 0;
   6791 		*cmdp = 0;
   6792 		return 0;
   6793 	}
   6794 
   6795 	if ((m0->m_pkthdr.csum_flags &
   6796 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6797 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6798 	} else {
   6799 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6800 	}
   6801 	ipcse = offset + iphl - 1;
   6802 
   6803 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6804 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6805 	seg = 0;
   6806 	fields = 0;
   6807 
   6808 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6809 		int hlen = offset + iphl;
   6810 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6811 
   6812 		if (__predict_false(m0->m_len <
   6813 				    (hlen + sizeof(struct tcphdr)))) {
   6814 			/*
   6815 			 * TCP/IP headers are not in the first mbuf; we need
   6816 			 * to do this the slow and painful way.  Let's just
   6817 			 * hope this doesn't happen very often.
   6818 			 */
   6819 			struct tcphdr th;
   6820 
   6821 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6822 
   6823 			m_copydata(m0, hlen, sizeof(th), &th);
   6824 			if (v4) {
   6825 				struct ip ip;
   6826 
   6827 				m_copydata(m0, offset, sizeof(ip), &ip);
   6828 				ip.ip_len = 0;
   6829 				m_copyback(m0,
   6830 				    offset + offsetof(struct ip, ip_len),
   6831 				    sizeof(ip.ip_len), &ip.ip_len);
   6832 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6833 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6834 			} else {
   6835 				struct ip6_hdr ip6;
   6836 
   6837 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6838 				ip6.ip6_plen = 0;
   6839 				m_copyback(m0,
   6840 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6841 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6842 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6843 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6844 			}
   6845 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6846 			    sizeof(th.th_sum), &th.th_sum);
   6847 
   6848 			hlen += th.th_off << 2;
   6849 		} else {
   6850 			/*
   6851 			 * TCP/IP headers are in the first mbuf; we can do
   6852 			 * this the easy way.
   6853 			 */
   6854 			struct tcphdr *th;
   6855 
   6856 			if (v4) {
   6857 				struct ip *ip =
   6858 				    (void *)(mtod(m0, char *) + offset);
   6859 				th = (void *)(mtod(m0, char *) + hlen);
   6860 
   6861 				ip->ip_len = 0;
   6862 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6863 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6864 			} else {
   6865 				struct ip6_hdr *ip6 =
   6866 				    (void *)(mtod(m0, char *) + offset);
   6867 				th = (void *)(mtod(m0, char *) + hlen);
   6868 
   6869 				ip6->ip6_plen = 0;
   6870 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6871 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6872 			}
   6873 			hlen += th->th_off << 2;
   6874 		}
   6875 
   6876 		if (v4) {
   6877 			WM_Q_EVCNT_INCR(txq, txtso);
   6878 			cmdlen |= WTX_TCPIP_CMD_IP;
   6879 		} else {
   6880 			WM_Q_EVCNT_INCR(txq, txtso6);
   6881 			ipcse = 0;
   6882 		}
   6883 		cmd |= WTX_TCPIP_CMD_TSE;
   6884 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6885 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6886 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6887 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6888 	}
   6889 
   6890 	/*
   6891 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6892 	 * offload feature, if we load the context descriptor, we
   6893 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6894 	 */
   6895 
   6896 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6897 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6898 	    WTX_TCPIP_IPCSE(ipcse);
   6899 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6900 		WM_Q_EVCNT_INCR(txq, txipsum);
   6901 		fields |= WTX_IXSM;
   6902 	}
   6903 
   6904 	offset += iphl;
   6905 
   6906 	if (m0->m_pkthdr.csum_flags &
   6907 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6908 		WM_Q_EVCNT_INCR(txq, txtusum);
   6909 		fields |= WTX_TXSM;
   6910 		tucs = WTX_TCPIP_TUCSS(offset) |
   6911 		    WTX_TCPIP_TUCSO(offset +
   6912 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6913 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6914 	} else if ((m0->m_pkthdr.csum_flags &
   6915 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6916 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6917 		fields |= WTX_TXSM;
   6918 		tucs = WTX_TCPIP_TUCSS(offset) |
   6919 		    WTX_TCPIP_TUCSO(offset +
   6920 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6921 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6922 	} else {
   6923 		/* Just initialize it to a valid TCP context. */
   6924 		tucs = WTX_TCPIP_TUCSS(offset) |
   6925 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6926 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6927 	}
   6928 
   6929 	/*
   6930 	 * We don't have to write context descriptor for every packet
   6931 	 * except for 82574. For 82574, we must write context descriptor
   6932 	 * for every packet when we use two descriptor queues.
   6933 	 * It would be overhead to write context descriptor for every packet,
   6934 	 * however it does not cause problems.
   6935 	 */
   6936 	/* Fill in the context descriptor. */
   6937 	t = (struct livengood_tcpip_ctxdesc *)
   6938 	    &txq->txq_descs[txq->txq_next];
   6939 	t->tcpip_ipcs = htole32(ipcs);
   6940 	t->tcpip_tucs = htole32(tucs);
   6941 	t->tcpip_cmdlen = htole32(cmdlen);
   6942 	t->tcpip_seg = htole32(seg);
   6943 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6944 
   6945 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6946 	txs->txs_ndesc++;
   6947 
   6948 	*cmdp = cmd;
   6949 	*fieldsp = fields;
   6950 
   6951 	return 0;
   6952 }
   6953 
   6954 static inline int
   6955 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6956 {
   6957 	struct wm_softc *sc = ifp->if_softc;
   6958 	u_int cpuid = cpu_index(curcpu());
   6959 
   6960 	/*
   6961 	 * Currently, simple distribute strategy.
   6962 	 * TODO:
   6963 	 * distribute by flowid(RSS has value).
   6964 	 */
   6965         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6966 }
   6967 
   6968 /*
   6969  * wm_start:		[ifnet interface function]
   6970  *
   6971  *	Start packet transmission on the interface.
   6972  */
   6973 static void
   6974 wm_start(struct ifnet *ifp)
   6975 {
   6976 	struct wm_softc *sc = ifp->if_softc;
   6977 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6978 
   6979 #ifdef WM_MPSAFE
   6980 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6981 #endif
   6982 	/*
   6983 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6984 	 */
   6985 
   6986 	mutex_enter(txq->txq_lock);
   6987 	if (!txq->txq_stopping)
   6988 		wm_start_locked(ifp);
   6989 	mutex_exit(txq->txq_lock);
   6990 }
   6991 
   6992 static void
   6993 wm_start_locked(struct ifnet *ifp)
   6994 {
   6995 	struct wm_softc *sc = ifp->if_softc;
   6996 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6997 
   6998 	wm_send_common_locked(ifp, txq, false);
   6999 }
   7000 
   7001 static int
   7002 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7003 {
   7004 	int qid;
   7005 	struct wm_softc *sc = ifp->if_softc;
   7006 	struct wm_txqueue *txq;
   7007 
   7008 	qid = wm_select_txqueue(ifp, m);
   7009 	txq = &sc->sc_queue[qid].wmq_txq;
   7010 
   7011 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7012 		m_freem(m);
   7013 		WM_Q_EVCNT_INCR(txq, txdrop);
   7014 		return ENOBUFS;
   7015 	}
   7016 
   7017 	/*
   7018 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7019 	 */
   7020 	ifp->if_obytes += m->m_pkthdr.len;
   7021 	if (m->m_flags & M_MCAST)
   7022 		ifp->if_omcasts++;
   7023 
   7024 	if (mutex_tryenter(txq->txq_lock)) {
   7025 		if (!txq->txq_stopping)
   7026 			wm_transmit_locked(ifp, txq);
   7027 		mutex_exit(txq->txq_lock);
   7028 	}
   7029 
   7030 	return 0;
   7031 }
   7032 
   7033 static void
   7034 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7035 {
   7036 
   7037 	wm_send_common_locked(ifp, txq, true);
   7038 }
   7039 
   7040 static void
   7041 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7042     bool is_transmit)
   7043 {
   7044 	struct wm_softc *sc = ifp->if_softc;
   7045 	struct mbuf *m0;
   7046 	struct m_tag *mtag;
   7047 	struct wm_txsoft *txs;
   7048 	bus_dmamap_t dmamap;
   7049 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7050 	bus_addr_t curaddr;
   7051 	bus_size_t seglen, curlen;
   7052 	uint32_t cksumcmd;
   7053 	uint8_t cksumfields;
   7054 
   7055 	KASSERT(mutex_owned(txq->txq_lock));
   7056 
   7057 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7058 		return;
   7059 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7060 		return;
   7061 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7062 		return;
   7063 
   7064 	/* Remember the previous number of free descriptors. */
   7065 	ofree = txq->txq_free;
   7066 
   7067 	/*
   7068 	 * Loop through the send queue, setting up transmit descriptors
   7069 	 * until we drain the queue, or use up all available transmit
   7070 	 * descriptors.
   7071 	 */
   7072 	for (;;) {
   7073 		m0 = NULL;
   7074 
   7075 		/* Get a work queue entry. */
   7076 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7077 			wm_txeof(sc, txq);
   7078 			if (txq->txq_sfree == 0) {
   7079 				DPRINTF(WM_DEBUG_TX,
   7080 				    ("%s: TX: no free job descriptors\n",
   7081 					device_xname(sc->sc_dev)));
   7082 				WM_Q_EVCNT_INCR(txq, txsstall);
   7083 				break;
   7084 			}
   7085 		}
   7086 
   7087 		/* Grab a packet off the queue. */
   7088 		if (is_transmit)
   7089 			m0 = pcq_get(txq->txq_interq);
   7090 		else
   7091 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7092 		if (m0 == NULL)
   7093 			break;
   7094 
   7095 		DPRINTF(WM_DEBUG_TX,
   7096 		    ("%s: TX: have packet to transmit: %p\n",
   7097 		    device_xname(sc->sc_dev), m0));
   7098 
   7099 		txs = &txq->txq_soft[txq->txq_snext];
   7100 		dmamap = txs->txs_dmamap;
   7101 
   7102 		use_tso = (m0->m_pkthdr.csum_flags &
   7103 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7104 
   7105 		/*
   7106 		 * So says the Linux driver:
   7107 		 * The controller does a simple calculation to make sure
   7108 		 * there is enough room in the FIFO before initiating the
   7109 		 * DMA for each buffer.  The calc is:
   7110 		 *	4 = ceil(buffer len / MSS)
   7111 		 * To make sure we don't overrun the FIFO, adjust the max
   7112 		 * buffer len if the MSS drops.
   7113 		 */
   7114 		dmamap->dm_maxsegsz =
   7115 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7116 		    ? m0->m_pkthdr.segsz << 2
   7117 		    : WTX_MAX_LEN;
   7118 
   7119 		/*
   7120 		 * Load the DMA map.  If this fails, the packet either
   7121 		 * didn't fit in the allotted number of segments, or we
   7122 		 * were short on resources.  For the too-many-segments
   7123 		 * case, we simply report an error and drop the packet,
   7124 		 * since we can't sanely copy a jumbo packet to a single
   7125 		 * buffer.
   7126 		 */
   7127 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7128 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7129 		if (error) {
   7130 			if (error == EFBIG) {
   7131 				WM_Q_EVCNT_INCR(txq, txdrop);
   7132 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7133 				    "DMA segments, dropping...\n",
   7134 				    device_xname(sc->sc_dev));
   7135 				wm_dump_mbuf_chain(sc, m0);
   7136 				m_freem(m0);
   7137 				continue;
   7138 			}
   7139 			/*  Short on resources, just stop for now. */
   7140 			DPRINTF(WM_DEBUG_TX,
   7141 			    ("%s: TX: dmamap load failed: %d\n",
   7142 			    device_xname(sc->sc_dev), error));
   7143 			break;
   7144 		}
   7145 
   7146 		segs_needed = dmamap->dm_nsegs;
   7147 		if (use_tso) {
   7148 			/* For sentinel descriptor; see below. */
   7149 			segs_needed++;
   7150 		}
   7151 
   7152 		/*
   7153 		 * Ensure we have enough descriptors free to describe
   7154 		 * the packet.  Note, we always reserve one descriptor
   7155 		 * at the end of the ring due to the semantics of the
   7156 		 * TDT register, plus one more in the event we need
   7157 		 * to load offload context.
   7158 		 */
   7159 		if (segs_needed > txq->txq_free - 2) {
   7160 			/*
   7161 			 * Not enough free descriptors to transmit this
   7162 			 * packet.  We haven't committed anything yet,
   7163 			 * so just unload the DMA map, put the packet
   7164 			 * pack on the queue, and punt.  Notify the upper
   7165 			 * layer that there are no more slots left.
   7166 			 */
   7167 			DPRINTF(WM_DEBUG_TX,
   7168 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7169 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7170 			    segs_needed, txq->txq_free - 1));
   7171 			if (!is_transmit)
   7172 				ifp->if_flags |= IFF_OACTIVE;
   7173 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7174 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7175 			WM_Q_EVCNT_INCR(txq, txdstall);
   7176 			break;
   7177 		}
   7178 
   7179 		/*
   7180 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7181 		 * once we know we can transmit the packet, since we
   7182 		 * do some internal FIFO space accounting here.
   7183 		 */
   7184 		if (sc->sc_type == WM_T_82547 &&
   7185 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7186 			DPRINTF(WM_DEBUG_TX,
   7187 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7188 			    device_xname(sc->sc_dev)));
   7189 			if (!is_transmit)
   7190 				ifp->if_flags |= IFF_OACTIVE;
   7191 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7192 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7193 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7194 			break;
   7195 		}
   7196 
   7197 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7198 
   7199 		DPRINTF(WM_DEBUG_TX,
   7200 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7201 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7202 
   7203 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7204 
   7205 		/*
   7206 		 * Store a pointer to the packet so that we can free it
   7207 		 * later.
   7208 		 *
   7209 		 * Initially, we consider the number of descriptors the
   7210 		 * packet uses the number of DMA segments.  This may be
   7211 		 * incremented by 1 if we do checksum offload (a descriptor
   7212 		 * is used to set the checksum context).
   7213 		 */
   7214 		txs->txs_mbuf = m0;
   7215 		txs->txs_firstdesc = txq->txq_next;
   7216 		txs->txs_ndesc = segs_needed;
   7217 
   7218 		/* Set up offload parameters for this packet. */
   7219 		if (m0->m_pkthdr.csum_flags &
   7220 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7221 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7222 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7223 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7224 					  &cksumfields) != 0) {
   7225 				/* Error message already displayed. */
   7226 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7227 				continue;
   7228 			}
   7229 		} else {
   7230 			cksumcmd = 0;
   7231 			cksumfields = 0;
   7232 		}
   7233 
   7234 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7235 
   7236 		/* Sync the DMA map. */
   7237 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7238 		    BUS_DMASYNC_PREWRITE);
   7239 
   7240 		/* Initialize the transmit descriptor. */
   7241 		for (nexttx = txq->txq_next, seg = 0;
   7242 		     seg < dmamap->dm_nsegs; seg++) {
   7243 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7244 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7245 			     seglen != 0;
   7246 			     curaddr += curlen, seglen -= curlen,
   7247 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7248 				curlen = seglen;
   7249 
   7250 				/*
   7251 				 * So says the Linux driver:
   7252 				 * Work around for premature descriptor
   7253 				 * write-backs in TSO mode.  Append a
   7254 				 * 4-byte sentinel descriptor.
   7255 				 */
   7256 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7257 				    curlen > 8)
   7258 					curlen -= 4;
   7259 
   7260 				wm_set_dma_addr(
   7261 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7262 				txq->txq_descs[nexttx].wtx_cmdlen
   7263 				    = htole32(cksumcmd | curlen);
   7264 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7265 				    = 0;
   7266 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7267 				    = cksumfields;
   7268 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7269 				lasttx = nexttx;
   7270 
   7271 				DPRINTF(WM_DEBUG_TX,
   7272 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7273 				     "len %#04zx\n",
   7274 				    device_xname(sc->sc_dev), nexttx,
   7275 				    (uint64_t)curaddr, curlen));
   7276 			}
   7277 		}
   7278 
   7279 		KASSERT(lasttx != -1);
   7280 
   7281 		/*
   7282 		 * Set up the command byte on the last descriptor of
   7283 		 * the packet.  If we're in the interrupt delay window,
   7284 		 * delay the interrupt.
   7285 		 */
   7286 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7287 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7288 
   7289 		/*
   7290 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7291 		 * up the descriptor to encapsulate the packet for us.
   7292 		 *
   7293 		 * This is only valid on the last descriptor of the packet.
   7294 		 */
   7295 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7296 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7297 			    htole32(WTX_CMD_VLE);
   7298 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7299 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7300 		}
   7301 
   7302 		txs->txs_lastdesc = lasttx;
   7303 
   7304 		DPRINTF(WM_DEBUG_TX,
   7305 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7306 		    device_xname(sc->sc_dev),
   7307 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7308 
   7309 		/* Sync the descriptors we're using. */
   7310 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7311 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7312 
   7313 		/* Give the packet to the chip. */
   7314 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7315 
   7316 		DPRINTF(WM_DEBUG_TX,
   7317 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7318 
   7319 		DPRINTF(WM_DEBUG_TX,
   7320 		    ("%s: TX: finished transmitting packet, job %d\n",
   7321 		    device_xname(sc->sc_dev), txq->txq_snext));
   7322 
   7323 		/* Advance the tx pointer. */
   7324 		txq->txq_free -= txs->txs_ndesc;
   7325 		txq->txq_next = nexttx;
   7326 
   7327 		txq->txq_sfree--;
   7328 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7329 
   7330 		/* Pass the packet to any BPF listeners. */
   7331 		bpf_mtap(ifp, m0);
   7332 	}
   7333 
   7334 	if (m0 != NULL) {
   7335 		if (!is_transmit)
   7336 			ifp->if_flags |= IFF_OACTIVE;
   7337 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7338 		WM_Q_EVCNT_INCR(txq, txdrop);
   7339 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7340 			__func__));
   7341 		m_freem(m0);
   7342 	}
   7343 
   7344 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7345 		/* No more slots; notify upper layer. */
   7346 		if (!is_transmit)
   7347 			ifp->if_flags |= IFF_OACTIVE;
   7348 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7349 	}
   7350 
   7351 	if (txq->txq_free != ofree) {
   7352 		/* Set a watchdog timer in case the chip flakes out. */
   7353 		ifp->if_timer = 5;
   7354 	}
   7355 }
   7356 
   7357 /*
   7358  * wm_nq_tx_offload:
   7359  *
   7360  *	Set up TCP/IP checksumming parameters for the
   7361  *	specified packet, for NEWQUEUE devices
   7362  */
   7363 static int
   7364 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7365     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7366 {
   7367 	struct mbuf *m0 = txs->txs_mbuf;
   7368 	struct m_tag *mtag;
   7369 	uint32_t vl_len, mssidx, cmdc;
   7370 	struct ether_header *eh;
   7371 	int offset, iphl;
   7372 
   7373 	/*
   7374 	 * XXX It would be nice if the mbuf pkthdr had offset
   7375 	 * fields for the protocol headers.
   7376 	 */
   7377 	*cmdlenp = 0;
   7378 	*fieldsp = 0;
   7379 
   7380 	eh = mtod(m0, struct ether_header *);
   7381 	switch (htons(eh->ether_type)) {
   7382 	case ETHERTYPE_IP:
   7383 	case ETHERTYPE_IPV6:
   7384 		offset = ETHER_HDR_LEN;
   7385 		break;
   7386 
   7387 	case ETHERTYPE_VLAN:
   7388 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7389 		break;
   7390 
   7391 	default:
   7392 		/* Don't support this protocol or encapsulation. */
   7393 		*do_csum = false;
   7394 		return 0;
   7395 	}
   7396 	*do_csum = true;
   7397 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7398 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7399 
   7400 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7401 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7402 
   7403 	if ((m0->m_pkthdr.csum_flags &
   7404 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7405 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7406 	} else {
   7407 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7408 	}
   7409 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7410 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7411 
   7412 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7413 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7414 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7415 		*cmdlenp |= NQTX_CMD_VLE;
   7416 	}
   7417 
   7418 	mssidx = 0;
   7419 
   7420 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7421 		int hlen = offset + iphl;
   7422 		int tcp_hlen;
   7423 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7424 
   7425 		if (__predict_false(m0->m_len <
   7426 				    (hlen + sizeof(struct tcphdr)))) {
   7427 			/*
   7428 			 * TCP/IP headers are not in the first mbuf; we need
   7429 			 * to do this the slow and painful way.  Let's just
   7430 			 * hope this doesn't happen very often.
   7431 			 */
   7432 			struct tcphdr th;
   7433 
   7434 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7435 
   7436 			m_copydata(m0, hlen, sizeof(th), &th);
   7437 			if (v4) {
   7438 				struct ip ip;
   7439 
   7440 				m_copydata(m0, offset, sizeof(ip), &ip);
   7441 				ip.ip_len = 0;
   7442 				m_copyback(m0,
   7443 				    offset + offsetof(struct ip, ip_len),
   7444 				    sizeof(ip.ip_len), &ip.ip_len);
   7445 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7446 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7447 			} else {
   7448 				struct ip6_hdr ip6;
   7449 
   7450 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7451 				ip6.ip6_plen = 0;
   7452 				m_copyback(m0,
   7453 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7454 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7455 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7456 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7457 			}
   7458 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7459 			    sizeof(th.th_sum), &th.th_sum);
   7460 
   7461 			tcp_hlen = th.th_off << 2;
   7462 		} else {
   7463 			/*
   7464 			 * TCP/IP headers are in the first mbuf; we can do
   7465 			 * this the easy way.
   7466 			 */
   7467 			struct tcphdr *th;
   7468 
   7469 			if (v4) {
   7470 				struct ip *ip =
   7471 				    (void *)(mtod(m0, char *) + offset);
   7472 				th = (void *)(mtod(m0, char *) + hlen);
   7473 
   7474 				ip->ip_len = 0;
   7475 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7476 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7477 			} else {
   7478 				struct ip6_hdr *ip6 =
   7479 				    (void *)(mtod(m0, char *) + offset);
   7480 				th = (void *)(mtod(m0, char *) + hlen);
   7481 
   7482 				ip6->ip6_plen = 0;
   7483 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7484 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7485 			}
   7486 			tcp_hlen = th->th_off << 2;
   7487 		}
   7488 		hlen += tcp_hlen;
   7489 		*cmdlenp |= NQTX_CMD_TSE;
   7490 
   7491 		if (v4) {
   7492 			WM_Q_EVCNT_INCR(txq, txtso);
   7493 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7494 		} else {
   7495 			WM_Q_EVCNT_INCR(txq, txtso6);
   7496 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7497 		}
   7498 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7499 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7500 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7501 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7502 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7503 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7504 	} else {
   7505 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7506 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7507 	}
   7508 
   7509 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7510 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7511 		cmdc |= NQTXC_CMD_IP4;
   7512 	}
   7513 
   7514 	if (m0->m_pkthdr.csum_flags &
   7515 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7516 		WM_Q_EVCNT_INCR(txq, txtusum);
   7517 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7518 			cmdc |= NQTXC_CMD_TCP;
   7519 		} else {
   7520 			cmdc |= NQTXC_CMD_UDP;
   7521 		}
   7522 		cmdc |= NQTXC_CMD_IP4;
   7523 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7524 	}
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7527 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7528 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7529 			cmdc |= NQTXC_CMD_TCP;
   7530 		} else {
   7531 			cmdc |= NQTXC_CMD_UDP;
   7532 		}
   7533 		cmdc |= NQTXC_CMD_IP6;
   7534 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7535 	}
   7536 
   7537 	/*
   7538 	 * We don't have to write context descriptor for every packet to
   7539 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7540 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7541 	 * controllers.
   7542 	 * It would be overhead to write context descriptor for every packet,
   7543 	 * however it does not cause problems.
   7544 	 */
   7545 	/* Fill in the context descriptor. */
   7546 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7547 	    htole32(vl_len);
   7548 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7549 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7550 	    htole32(cmdc);
   7551 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7552 	    htole32(mssidx);
   7553 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7554 	DPRINTF(WM_DEBUG_TX,
   7555 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7556 	    txq->txq_next, 0, vl_len));
   7557 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7558 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7559 	txs->txs_ndesc++;
   7560 	return 0;
   7561 }
   7562 
   7563 /*
   7564  * wm_nq_start:		[ifnet interface function]
   7565  *
   7566  *	Start packet transmission on the interface for NEWQUEUE devices
   7567  */
   7568 static void
   7569 wm_nq_start(struct ifnet *ifp)
   7570 {
   7571 	struct wm_softc *sc = ifp->if_softc;
   7572 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7573 
   7574 #ifdef WM_MPSAFE
   7575 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7576 #endif
   7577 	/*
   7578 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7579 	 */
   7580 
   7581 	mutex_enter(txq->txq_lock);
   7582 	if (!txq->txq_stopping)
   7583 		wm_nq_start_locked(ifp);
   7584 	mutex_exit(txq->txq_lock);
   7585 }
   7586 
   7587 static void
   7588 wm_nq_start_locked(struct ifnet *ifp)
   7589 {
   7590 	struct wm_softc *sc = ifp->if_softc;
   7591 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7592 
   7593 	wm_nq_send_common_locked(ifp, txq, false);
   7594 }
   7595 
   7596 static int
   7597 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7598 {
   7599 	int qid;
   7600 	struct wm_softc *sc = ifp->if_softc;
   7601 	struct wm_txqueue *txq;
   7602 
   7603 	qid = wm_select_txqueue(ifp, m);
   7604 	txq = &sc->sc_queue[qid].wmq_txq;
   7605 
   7606 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7607 		m_freem(m);
   7608 		WM_Q_EVCNT_INCR(txq, txdrop);
   7609 		return ENOBUFS;
   7610 	}
   7611 
   7612 	/*
   7613 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7614 	 */
   7615 	ifp->if_obytes += m->m_pkthdr.len;
   7616 	if (m->m_flags & M_MCAST)
   7617 		ifp->if_omcasts++;
   7618 
   7619 	/*
   7620 	 * The situations which this mutex_tryenter() fails at running time
   7621 	 * are below two patterns.
   7622 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7623 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7624 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7625 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7626 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7627 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7628 	 */
   7629 	if (mutex_tryenter(txq->txq_lock)) {
   7630 		if (!txq->txq_stopping)
   7631 			wm_nq_transmit_locked(ifp, txq);
   7632 		mutex_exit(txq->txq_lock);
   7633 	}
   7634 
   7635 	return 0;
   7636 }
   7637 
   7638 static void
   7639 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7640 {
   7641 
   7642 	wm_nq_send_common_locked(ifp, txq, true);
   7643 }
   7644 
   7645 static void
   7646 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7647     bool is_transmit)
   7648 {
   7649 	struct wm_softc *sc = ifp->if_softc;
   7650 	struct mbuf *m0;
   7651 	struct m_tag *mtag;
   7652 	struct wm_txsoft *txs;
   7653 	bus_dmamap_t dmamap;
   7654 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7655 	bool do_csum, sent;
   7656 
   7657 	KASSERT(mutex_owned(txq->txq_lock));
   7658 
   7659 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7660 		return;
   7661 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7662 		return;
   7663 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7664 		return;
   7665 
   7666 	sent = false;
   7667 
   7668 	/*
   7669 	 * Loop through the send queue, setting up transmit descriptors
   7670 	 * until we drain the queue, or use up all available transmit
   7671 	 * descriptors.
   7672 	 */
   7673 	for (;;) {
   7674 		m0 = NULL;
   7675 
   7676 		/* Get a work queue entry. */
   7677 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7678 			wm_txeof(sc, txq);
   7679 			if (txq->txq_sfree == 0) {
   7680 				DPRINTF(WM_DEBUG_TX,
   7681 				    ("%s: TX: no free job descriptors\n",
   7682 					device_xname(sc->sc_dev)));
   7683 				WM_Q_EVCNT_INCR(txq, txsstall);
   7684 				break;
   7685 			}
   7686 		}
   7687 
   7688 		/* Grab a packet off the queue. */
   7689 		if (is_transmit)
   7690 			m0 = pcq_get(txq->txq_interq);
   7691 		else
   7692 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7693 		if (m0 == NULL)
   7694 			break;
   7695 
   7696 		DPRINTF(WM_DEBUG_TX,
   7697 		    ("%s: TX: have packet to transmit: %p\n",
   7698 		    device_xname(sc->sc_dev), m0));
   7699 
   7700 		txs = &txq->txq_soft[txq->txq_snext];
   7701 		dmamap = txs->txs_dmamap;
   7702 
   7703 		/*
   7704 		 * Load the DMA map.  If this fails, the packet either
   7705 		 * didn't fit in the allotted number of segments, or we
   7706 		 * were short on resources.  For the too-many-segments
   7707 		 * case, we simply report an error and drop the packet,
   7708 		 * since we can't sanely copy a jumbo packet to a single
   7709 		 * buffer.
   7710 		 */
   7711 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7712 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7713 		if (error) {
   7714 			if (error == EFBIG) {
   7715 				WM_Q_EVCNT_INCR(txq, txdrop);
   7716 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7717 				    "DMA segments, dropping...\n",
   7718 				    device_xname(sc->sc_dev));
   7719 				wm_dump_mbuf_chain(sc, m0);
   7720 				m_freem(m0);
   7721 				continue;
   7722 			}
   7723 			/* Short on resources, just stop for now. */
   7724 			DPRINTF(WM_DEBUG_TX,
   7725 			    ("%s: TX: dmamap load failed: %d\n",
   7726 			    device_xname(sc->sc_dev), error));
   7727 			break;
   7728 		}
   7729 
   7730 		segs_needed = dmamap->dm_nsegs;
   7731 
   7732 		/*
   7733 		 * Ensure we have enough descriptors free to describe
   7734 		 * the packet.  Note, we always reserve one descriptor
   7735 		 * at the end of the ring due to the semantics of the
   7736 		 * TDT register, plus one more in the event we need
   7737 		 * to load offload context.
   7738 		 */
   7739 		if (segs_needed > txq->txq_free - 2) {
   7740 			/*
   7741 			 * Not enough free descriptors to transmit this
   7742 			 * packet.  We haven't committed anything yet,
   7743 			 * so just unload the DMA map, put the packet
   7744 			 * pack on the queue, and punt.  Notify the upper
   7745 			 * layer that there are no more slots left.
   7746 			 */
   7747 			DPRINTF(WM_DEBUG_TX,
   7748 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7749 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7750 			    segs_needed, txq->txq_free - 1));
   7751 			if (!is_transmit)
   7752 				ifp->if_flags |= IFF_OACTIVE;
   7753 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7754 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7755 			WM_Q_EVCNT_INCR(txq, txdstall);
   7756 			break;
   7757 		}
   7758 
   7759 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7760 
   7761 		DPRINTF(WM_DEBUG_TX,
   7762 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7763 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7764 
   7765 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7766 
   7767 		/*
   7768 		 * Store a pointer to the packet so that we can free it
   7769 		 * later.
   7770 		 *
   7771 		 * Initially, we consider the number of descriptors the
   7772 		 * packet uses the number of DMA segments.  This may be
   7773 		 * incremented by 1 if we do checksum offload (a descriptor
   7774 		 * is used to set the checksum context).
   7775 		 */
   7776 		txs->txs_mbuf = m0;
   7777 		txs->txs_firstdesc = txq->txq_next;
   7778 		txs->txs_ndesc = segs_needed;
   7779 
   7780 		/* Set up offload parameters for this packet. */
   7781 		uint32_t cmdlen, fields, dcmdlen;
   7782 		if (m0->m_pkthdr.csum_flags &
   7783 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7784 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7785 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7786 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7787 			    &do_csum) != 0) {
   7788 				/* Error message already displayed. */
   7789 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7790 				continue;
   7791 			}
   7792 		} else {
   7793 			do_csum = false;
   7794 			cmdlen = 0;
   7795 			fields = 0;
   7796 		}
   7797 
   7798 		/* Sync the DMA map. */
   7799 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7800 		    BUS_DMASYNC_PREWRITE);
   7801 
   7802 		/* Initialize the first transmit descriptor. */
   7803 		nexttx = txq->txq_next;
   7804 		if (!do_csum) {
   7805 			/* setup a legacy descriptor */
   7806 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7807 			    dmamap->dm_segs[0].ds_addr);
   7808 			txq->txq_descs[nexttx].wtx_cmdlen =
   7809 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7810 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7811 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7812 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7813 			    NULL) {
   7814 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7815 				    htole32(WTX_CMD_VLE);
   7816 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7817 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7818 			} else {
   7819 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7820 			}
   7821 			dcmdlen = 0;
   7822 		} else {
   7823 			/* setup an advanced data descriptor */
   7824 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7825 			    htole64(dmamap->dm_segs[0].ds_addr);
   7826 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7827 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7828 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7829 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7830 			    htole32(fields);
   7831 			DPRINTF(WM_DEBUG_TX,
   7832 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7833 			    device_xname(sc->sc_dev), nexttx,
   7834 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7835 			DPRINTF(WM_DEBUG_TX,
   7836 			    ("\t 0x%08x%08x\n", fields,
   7837 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7838 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7839 		}
   7840 
   7841 		lasttx = nexttx;
   7842 		nexttx = WM_NEXTTX(txq, nexttx);
   7843 		/*
   7844 		 * fill in the next descriptors. legacy or adcanced format
   7845 		 * is the same here
   7846 		 */
   7847 		for (seg = 1; seg < dmamap->dm_nsegs;
   7848 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7850 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7851 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7852 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7853 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7854 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7855 			lasttx = nexttx;
   7856 
   7857 			DPRINTF(WM_DEBUG_TX,
   7858 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7859 			     "len %#04zx\n",
   7860 			    device_xname(sc->sc_dev), nexttx,
   7861 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7862 			    dmamap->dm_segs[seg].ds_len));
   7863 		}
   7864 
   7865 		KASSERT(lasttx != -1);
   7866 
   7867 		/*
   7868 		 * Set up the command byte on the last descriptor of
   7869 		 * the packet.  If we're in the interrupt delay window,
   7870 		 * delay the interrupt.
   7871 		 */
   7872 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7873 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7874 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7875 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7876 
   7877 		txs->txs_lastdesc = lasttx;
   7878 
   7879 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7880 		    device_xname(sc->sc_dev),
   7881 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7882 
   7883 		/* Sync the descriptors we're using. */
   7884 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7885 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7886 
   7887 		/* Give the packet to the chip. */
   7888 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7889 		sent = true;
   7890 
   7891 		DPRINTF(WM_DEBUG_TX,
   7892 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7893 
   7894 		DPRINTF(WM_DEBUG_TX,
   7895 		    ("%s: TX: finished transmitting packet, job %d\n",
   7896 		    device_xname(sc->sc_dev), txq->txq_snext));
   7897 
   7898 		/* Advance the tx pointer. */
   7899 		txq->txq_free -= txs->txs_ndesc;
   7900 		txq->txq_next = nexttx;
   7901 
   7902 		txq->txq_sfree--;
   7903 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7904 
   7905 		/* Pass the packet to any BPF listeners. */
   7906 		bpf_mtap(ifp, m0);
   7907 	}
   7908 
   7909 	if (m0 != NULL) {
   7910 		if (!is_transmit)
   7911 			ifp->if_flags |= IFF_OACTIVE;
   7912 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7913 		WM_Q_EVCNT_INCR(txq, txdrop);
   7914 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7915 			__func__));
   7916 		m_freem(m0);
   7917 	}
   7918 
   7919 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7920 		/* No more slots; notify upper layer. */
   7921 		if (!is_transmit)
   7922 			ifp->if_flags |= IFF_OACTIVE;
   7923 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7924 	}
   7925 
   7926 	if (sent) {
   7927 		/* Set a watchdog timer in case the chip flakes out. */
   7928 		ifp->if_timer = 5;
   7929 	}
   7930 }
   7931 
   7932 static void
   7933 wm_deferred_start_locked(struct wm_txqueue *txq)
   7934 {
   7935 	struct wm_softc *sc = txq->txq_sc;
   7936 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7937 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7938 	int qid = wmq->wmq_id;
   7939 
   7940 	KASSERT(mutex_owned(txq->txq_lock));
   7941 
   7942 	if (txq->txq_stopping) {
   7943 		mutex_exit(txq->txq_lock);
   7944 		return;
   7945 	}
   7946 
   7947 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7948 		/* XXX need for ALTQ or one CPU system */
   7949 		if (qid == 0)
   7950 			wm_nq_start_locked(ifp);
   7951 		wm_nq_transmit_locked(ifp, txq);
   7952 	} else {
   7953 		/* XXX need for ALTQ or one CPU system */
   7954 		if (qid == 0)
   7955 			wm_start_locked(ifp);
   7956 		wm_transmit_locked(ifp, txq);
   7957 	}
   7958 }
   7959 
   7960 /* Interrupt */
   7961 
   7962 /*
   7963  * wm_txeof:
   7964  *
   7965  *	Helper; handle transmit interrupts.
   7966  */
   7967 static int
   7968 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7969 {
   7970 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7971 	struct wm_txsoft *txs;
   7972 	bool processed = false;
   7973 	int count = 0;
   7974 	int i;
   7975 	uint8_t status;
   7976 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7977 
   7978 	KASSERT(mutex_owned(txq->txq_lock));
   7979 
   7980 	if (txq->txq_stopping)
   7981 		return 0;
   7982 
   7983 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7984 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7985 	if (wmq->wmq_id == 0)
   7986 		ifp->if_flags &= ~IFF_OACTIVE;
   7987 
   7988 	/*
   7989 	 * Go through the Tx list and free mbufs for those
   7990 	 * frames which have been transmitted.
   7991 	 */
   7992 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7993 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7994 		txs = &txq->txq_soft[i];
   7995 
   7996 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7997 			device_xname(sc->sc_dev), i));
   7998 
   7999 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8000 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8001 
   8002 		status =
   8003 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8004 		if ((status & WTX_ST_DD) == 0) {
   8005 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8006 			    BUS_DMASYNC_PREREAD);
   8007 			break;
   8008 		}
   8009 
   8010 		processed = true;
   8011 		count++;
   8012 		DPRINTF(WM_DEBUG_TX,
   8013 		    ("%s: TX: job %d done: descs %d..%d\n",
   8014 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8015 		    txs->txs_lastdesc));
   8016 
   8017 		/*
   8018 		 * XXX We should probably be using the statistics
   8019 		 * XXX registers, but I don't know if they exist
   8020 		 * XXX on chips before the i82544.
   8021 		 */
   8022 
   8023 #ifdef WM_EVENT_COUNTERS
   8024 		if (status & WTX_ST_TU)
   8025 			WM_Q_EVCNT_INCR(txq, tu);
   8026 #endif /* WM_EVENT_COUNTERS */
   8027 
   8028 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8029 			ifp->if_oerrors++;
   8030 			if (status & WTX_ST_LC)
   8031 				log(LOG_WARNING, "%s: late collision\n",
   8032 				    device_xname(sc->sc_dev));
   8033 			else if (status & WTX_ST_EC) {
   8034 				ifp->if_collisions += 16;
   8035 				log(LOG_WARNING, "%s: excessive collisions\n",
   8036 				    device_xname(sc->sc_dev));
   8037 			}
   8038 		} else
   8039 			ifp->if_opackets++;
   8040 
   8041 		txq->txq_packets++;
   8042 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8043 
   8044 		txq->txq_free += txs->txs_ndesc;
   8045 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8046 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8047 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8048 		m_freem(txs->txs_mbuf);
   8049 		txs->txs_mbuf = NULL;
   8050 	}
   8051 
   8052 	/* Update the dirty transmit buffer pointer. */
   8053 	txq->txq_sdirty = i;
   8054 	DPRINTF(WM_DEBUG_TX,
   8055 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8056 
   8057 	if (count != 0)
   8058 		rnd_add_uint32(&sc->rnd_source, count);
   8059 
   8060 	/*
   8061 	 * If there are no more pending transmissions, cancel the watchdog
   8062 	 * timer.
   8063 	 */
   8064 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8065 		ifp->if_timer = 0;
   8066 
   8067 	return processed;
   8068 }
   8069 
   8070 static inline uint32_t
   8071 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8072 {
   8073 	struct wm_softc *sc = rxq->rxq_sc;
   8074 
   8075 	if (sc->sc_type == WM_T_82574)
   8076 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8077 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8078 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8079 	else
   8080 		return rxq->rxq_descs[idx].wrx_status;
   8081 }
   8082 
   8083 static inline uint32_t
   8084 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8085 {
   8086 	struct wm_softc *sc = rxq->rxq_sc;
   8087 
   8088 	if (sc->sc_type == WM_T_82574)
   8089 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8090 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8091 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8092 	else
   8093 		return rxq->rxq_descs[idx].wrx_errors;
   8094 }
   8095 
   8096 static inline uint16_t
   8097 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8098 {
   8099 	struct wm_softc *sc = rxq->rxq_sc;
   8100 
   8101 	if (sc->sc_type == WM_T_82574)
   8102 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8103 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8104 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8105 	else
   8106 		return rxq->rxq_descs[idx].wrx_special;
   8107 }
   8108 
   8109 static inline int
   8110 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8111 {
   8112 	struct wm_softc *sc = rxq->rxq_sc;
   8113 
   8114 	if (sc->sc_type == WM_T_82574)
   8115 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8116 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8117 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8118 	else
   8119 		return rxq->rxq_descs[idx].wrx_len;
   8120 }
   8121 
   8122 #ifdef WM_DEBUG
   8123 static inline uint32_t
   8124 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8125 {
   8126 	struct wm_softc *sc = rxq->rxq_sc;
   8127 
   8128 	if (sc->sc_type == WM_T_82574)
   8129 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8130 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8131 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8132 	else
   8133 		return 0;
   8134 }
   8135 
   8136 static inline uint8_t
   8137 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8138 {
   8139 	struct wm_softc *sc = rxq->rxq_sc;
   8140 
   8141 	if (sc->sc_type == WM_T_82574)
   8142 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8143 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8144 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8145 	else
   8146 		return 0;
   8147 }
   8148 #endif /* WM_DEBUG */
   8149 
   8150 static inline bool
   8151 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8152     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8153 {
   8154 
   8155 	if (sc->sc_type == WM_T_82574)
   8156 		return (status & ext_bit) != 0;
   8157 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8158 		return (status & nq_bit) != 0;
   8159 	else
   8160 		return (status & legacy_bit) != 0;
   8161 }
   8162 
   8163 static inline bool
   8164 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8165     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8166 {
   8167 
   8168 	if (sc->sc_type == WM_T_82574)
   8169 		return (error & ext_bit) != 0;
   8170 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8171 		return (error & nq_bit) != 0;
   8172 	else
   8173 		return (error & legacy_bit) != 0;
   8174 }
   8175 
   8176 static inline bool
   8177 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8178 {
   8179 
   8180 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8181 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8182 		return true;
   8183 	else
   8184 		return false;
   8185 }
   8186 
   8187 static inline bool
   8188 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8189 {
   8190 	struct wm_softc *sc = rxq->rxq_sc;
   8191 
   8192 	/* XXXX missing error bit for newqueue? */
   8193 	if (wm_rxdesc_is_set_error(sc, errors,
   8194 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8195 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8196 		NQRXC_ERROR_RXE)) {
   8197 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8198 			log(LOG_WARNING, "%s: symbol error\n",
   8199 			    device_xname(sc->sc_dev));
   8200 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8201 			log(LOG_WARNING, "%s: receive sequence error\n",
   8202 			    device_xname(sc->sc_dev));
   8203 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8204 			log(LOG_WARNING, "%s: CRC error\n",
   8205 			    device_xname(sc->sc_dev));
   8206 		return true;
   8207 	}
   8208 
   8209 	return false;
   8210 }
   8211 
   8212 static inline bool
   8213 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8214 {
   8215 	struct wm_softc *sc = rxq->rxq_sc;
   8216 
   8217 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8218 		NQRXC_STATUS_DD)) {
   8219 		/* We have processed all of the receive descriptors. */
   8220 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8221 		return false;
   8222 	}
   8223 
   8224 	return true;
   8225 }
   8226 
   8227 static inline bool
   8228 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8229     struct mbuf *m)
   8230 {
   8231 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8232 
   8233 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8234 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8235 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8236 	}
   8237 
   8238 	return true;
   8239 }
   8240 
   8241 static inline void
   8242 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8243     uint32_t errors, struct mbuf *m)
   8244 {
   8245 	struct wm_softc *sc = rxq->rxq_sc;
   8246 
   8247 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8248 		if (wm_rxdesc_is_set_status(sc, status,
   8249 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8250 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8251 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8252 			if (wm_rxdesc_is_set_error(sc, errors,
   8253 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8254 				m->m_pkthdr.csum_flags |=
   8255 					M_CSUM_IPv4_BAD;
   8256 		}
   8257 		if (wm_rxdesc_is_set_status(sc, status,
   8258 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8259 			/*
   8260 			 * Note: we don't know if this was TCP or UDP,
   8261 			 * so we just set both bits, and expect the
   8262 			 * upper layers to deal.
   8263 			 */
   8264 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8265 			m->m_pkthdr.csum_flags |=
   8266 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8267 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8268 			if (wm_rxdesc_is_set_error(sc, errors,
   8269 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8270 				m->m_pkthdr.csum_flags |=
   8271 					M_CSUM_TCP_UDP_BAD;
   8272 		}
   8273 	}
   8274 }
   8275 
   8276 /*
   8277  * wm_rxeof:
   8278  *
   8279  *	Helper; handle receive interrupts.
   8280  */
   8281 static void
   8282 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8283 {
   8284 	struct wm_softc *sc = rxq->rxq_sc;
   8285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8286 	struct wm_rxsoft *rxs;
   8287 	struct mbuf *m;
   8288 	int i, len;
   8289 	int count = 0;
   8290 	uint32_t status, errors;
   8291 	uint16_t vlantag;
   8292 
   8293 	KASSERT(mutex_owned(rxq->rxq_lock));
   8294 
   8295 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8296 		if (limit-- == 0) {
   8297 			rxq->rxq_ptr = i;
   8298 			break;
   8299 		}
   8300 
   8301 		rxs = &rxq->rxq_soft[i];
   8302 
   8303 		DPRINTF(WM_DEBUG_RX,
   8304 		    ("%s: RX: checking descriptor %d\n",
   8305 		    device_xname(sc->sc_dev), i));
   8306 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8307 
   8308 		status = wm_rxdesc_get_status(rxq, i);
   8309 		errors = wm_rxdesc_get_errors(rxq, i);
   8310 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8311 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8312 #ifdef WM_DEBUG
   8313 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8314 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8315 #endif
   8316 
   8317 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8318 			/*
   8319 			 * Update the receive pointer holding rxq_lock
   8320 			 * consistent with increment counter.
   8321 			 */
   8322 			rxq->rxq_ptr = i;
   8323 			break;
   8324 		}
   8325 
   8326 		count++;
   8327 		if (__predict_false(rxq->rxq_discard)) {
   8328 			DPRINTF(WM_DEBUG_RX,
   8329 			    ("%s: RX: discarding contents of descriptor %d\n",
   8330 			    device_xname(sc->sc_dev), i));
   8331 			wm_init_rxdesc(rxq, i);
   8332 			if (wm_rxdesc_is_eop(rxq, status)) {
   8333 				/* Reset our state. */
   8334 				DPRINTF(WM_DEBUG_RX,
   8335 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8336 				    device_xname(sc->sc_dev)));
   8337 				rxq->rxq_discard = 0;
   8338 			}
   8339 			continue;
   8340 		}
   8341 
   8342 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8343 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8344 
   8345 		m = rxs->rxs_mbuf;
   8346 
   8347 		/*
   8348 		 * Add a new receive buffer to the ring, unless of
   8349 		 * course the length is zero. Treat the latter as a
   8350 		 * failed mapping.
   8351 		 */
   8352 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8353 			/*
   8354 			 * Failed, throw away what we've done so
   8355 			 * far, and discard the rest of the packet.
   8356 			 */
   8357 			ifp->if_ierrors++;
   8358 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8359 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8360 			wm_init_rxdesc(rxq, i);
   8361 			if (!wm_rxdesc_is_eop(rxq, status))
   8362 				rxq->rxq_discard = 1;
   8363 			if (rxq->rxq_head != NULL)
   8364 				m_freem(rxq->rxq_head);
   8365 			WM_RXCHAIN_RESET(rxq);
   8366 			DPRINTF(WM_DEBUG_RX,
   8367 			    ("%s: RX: Rx buffer allocation failed, "
   8368 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8369 			    rxq->rxq_discard ? " (discard)" : ""));
   8370 			continue;
   8371 		}
   8372 
   8373 		m->m_len = len;
   8374 		rxq->rxq_len += len;
   8375 		DPRINTF(WM_DEBUG_RX,
   8376 		    ("%s: RX: buffer at %p len %d\n",
   8377 		    device_xname(sc->sc_dev), m->m_data, len));
   8378 
   8379 		/* If this is not the end of the packet, keep looking. */
   8380 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8381 			WM_RXCHAIN_LINK(rxq, m);
   8382 			DPRINTF(WM_DEBUG_RX,
   8383 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8384 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8385 			continue;
   8386 		}
   8387 
   8388 		/*
   8389 		 * Okay, we have the entire packet now.  The chip is
   8390 		 * configured to include the FCS except I350 and I21[01]
   8391 		 * (not all chips can be configured to strip it),
   8392 		 * so we need to trim it.
   8393 		 * May need to adjust length of previous mbuf in the
   8394 		 * chain if the current mbuf is too short.
   8395 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8396 		 * is always set in I350, so we don't trim it.
   8397 		 */
   8398 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8399 		    && (sc->sc_type != WM_T_I210)
   8400 		    && (sc->sc_type != WM_T_I211)) {
   8401 			if (m->m_len < ETHER_CRC_LEN) {
   8402 				rxq->rxq_tail->m_len
   8403 				    -= (ETHER_CRC_LEN - m->m_len);
   8404 				m->m_len = 0;
   8405 			} else
   8406 				m->m_len -= ETHER_CRC_LEN;
   8407 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8408 		} else
   8409 			len = rxq->rxq_len;
   8410 
   8411 		WM_RXCHAIN_LINK(rxq, m);
   8412 
   8413 		*rxq->rxq_tailp = NULL;
   8414 		m = rxq->rxq_head;
   8415 
   8416 		WM_RXCHAIN_RESET(rxq);
   8417 
   8418 		DPRINTF(WM_DEBUG_RX,
   8419 		    ("%s: RX: have entire packet, len -> %d\n",
   8420 		    device_xname(sc->sc_dev), len));
   8421 
   8422 		/* If an error occurred, update stats and drop the packet. */
   8423 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8424 			m_freem(m);
   8425 			continue;
   8426 		}
   8427 
   8428 		/* No errors.  Receive the packet. */
   8429 		m_set_rcvif(m, ifp);
   8430 		m->m_pkthdr.len = len;
   8431 		/*
   8432 		 * TODO
   8433 		 * should be save rsshash and rsstype to this mbuf.
   8434 		 */
   8435 		DPRINTF(WM_DEBUG_RX,
   8436 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8437 			device_xname(sc->sc_dev), rsstype, rsshash));
   8438 
   8439 		/*
   8440 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8441 		 * for us.  Associate the tag with the packet.
   8442 		 */
   8443 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8444 			continue;
   8445 
   8446 		/* Set up checksum info for this packet. */
   8447 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8448 		/*
   8449 		 * Update the receive pointer holding rxq_lock consistent with
   8450 		 * increment counter.
   8451 		 */
   8452 		rxq->rxq_ptr = i;
   8453 		rxq->rxq_packets++;
   8454 		rxq->rxq_bytes += len;
   8455 		mutex_exit(rxq->rxq_lock);
   8456 
   8457 		/* Pass it on. */
   8458 		if_percpuq_enqueue(sc->sc_ipq, m);
   8459 
   8460 		mutex_enter(rxq->rxq_lock);
   8461 
   8462 		if (rxq->rxq_stopping)
   8463 			break;
   8464 	}
   8465 
   8466 	if (count != 0)
   8467 		rnd_add_uint32(&sc->rnd_source, count);
   8468 
   8469 	DPRINTF(WM_DEBUG_RX,
   8470 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8471 }
   8472 
   8473 /*
   8474  * wm_linkintr_gmii:
   8475  *
   8476  *	Helper; handle link interrupts for GMII.
   8477  */
   8478 static void
   8479 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8480 {
   8481 
   8482 	KASSERT(WM_CORE_LOCKED(sc));
   8483 
   8484 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8485 		__func__));
   8486 
   8487 	if (icr & ICR_LSC) {
   8488 		uint32_t reg;
   8489 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8490 
   8491 		if ((status & STATUS_LU) != 0) {
   8492 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8493 				device_xname(sc->sc_dev),
   8494 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8495 		} else {
   8496 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8497 				device_xname(sc->sc_dev)));
   8498 		}
   8499 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8500 			wm_gig_downshift_workaround_ich8lan(sc);
   8501 
   8502 		if ((sc->sc_type == WM_T_ICH8)
   8503 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8504 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8505 		}
   8506 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8507 			device_xname(sc->sc_dev)));
   8508 		mii_pollstat(&sc->sc_mii);
   8509 		if (sc->sc_type == WM_T_82543) {
   8510 			int miistatus, active;
   8511 
   8512 			/*
   8513 			 * With 82543, we need to force speed and
   8514 			 * duplex on the MAC equal to what the PHY
   8515 			 * speed and duplex configuration is.
   8516 			 */
   8517 			miistatus = sc->sc_mii.mii_media_status;
   8518 
   8519 			if (miistatus & IFM_ACTIVE) {
   8520 				active = sc->sc_mii.mii_media_active;
   8521 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8522 				switch (IFM_SUBTYPE(active)) {
   8523 				case IFM_10_T:
   8524 					sc->sc_ctrl |= CTRL_SPEED_10;
   8525 					break;
   8526 				case IFM_100_TX:
   8527 					sc->sc_ctrl |= CTRL_SPEED_100;
   8528 					break;
   8529 				case IFM_1000_T:
   8530 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8531 					break;
   8532 				default:
   8533 					/*
   8534 					 * fiber?
   8535 					 * Shoud not enter here.
   8536 					 */
   8537 					printf("unknown media (%x)\n", active);
   8538 					break;
   8539 				}
   8540 				if (active & IFM_FDX)
   8541 					sc->sc_ctrl |= CTRL_FD;
   8542 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8543 			}
   8544 		} else if (sc->sc_type == WM_T_PCH) {
   8545 			wm_k1_gig_workaround_hv(sc,
   8546 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8547 		}
   8548 
   8549 		if ((sc->sc_phytype == WMPHY_82578)
   8550 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8551 			== IFM_1000_T)) {
   8552 
   8553 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8554 				delay(200*1000); /* XXX too big */
   8555 
   8556 				/* Link stall fix for link up */
   8557 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8558 				    HV_MUX_DATA_CTRL,
   8559 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8560 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8561 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8562 				    HV_MUX_DATA_CTRL,
   8563 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8564 			}
   8565 		}
   8566 		/*
   8567 		 * I217 Packet Loss issue:
   8568 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8569 		 * on power up.
   8570 		 * Set the Beacon Duration for I217 to 8 usec
   8571 		 */
   8572 		if ((sc->sc_type == WM_T_PCH_LPT)
   8573 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8574 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8575 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8576 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8577 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8578 		}
   8579 
   8580 		/* XXX Work-around I218 hang issue */
   8581 		/* e1000_k1_workaround_lpt_lp() */
   8582 
   8583 		if ((sc->sc_type == WM_T_PCH_LPT)
   8584 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8585 			/*
   8586 			 * Set platform power management values for Latency
   8587 			 * Tolerance Reporting (LTR)
   8588 			 */
   8589 			wm_platform_pm_pch_lpt(sc,
   8590 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8591 				    != 0));
   8592 		}
   8593 
   8594 		/* FEXTNVM6 K1-off workaround */
   8595 		if (sc->sc_type == WM_T_PCH_SPT) {
   8596 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8597 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8598 			    & FEXTNVM6_K1_OFF_ENABLE)
   8599 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8600 			else
   8601 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8602 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8603 		}
   8604 	} else if (icr & ICR_RXSEQ) {
   8605 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8606 			device_xname(sc->sc_dev)));
   8607 	}
   8608 }
   8609 
   8610 /*
   8611  * wm_linkintr_tbi:
   8612  *
   8613  *	Helper; handle link interrupts for TBI mode.
   8614  */
   8615 static void
   8616 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8617 {
   8618 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8619 	uint32_t status;
   8620 
   8621 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8622 		__func__));
   8623 
   8624 	status = CSR_READ(sc, WMREG_STATUS);
   8625 	if (icr & ICR_LSC) {
   8626 		if (status & STATUS_LU) {
   8627 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8628 			    device_xname(sc->sc_dev),
   8629 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8630 			/*
   8631 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8632 			 * so we should update sc->sc_ctrl
   8633 			 */
   8634 
   8635 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8636 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8637 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8638 			if (status & STATUS_FD)
   8639 				sc->sc_tctl |=
   8640 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8641 			else
   8642 				sc->sc_tctl |=
   8643 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8644 			if (sc->sc_ctrl & CTRL_TFCE)
   8645 				sc->sc_fcrtl |= FCRTL_XONE;
   8646 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8647 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8648 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8649 				      sc->sc_fcrtl);
   8650 			sc->sc_tbi_linkup = 1;
   8651 			if_link_state_change(ifp, LINK_STATE_UP);
   8652 		} else {
   8653 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8654 			    device_xname(sc->sc_dev)));
   8655 			sc->sc_tbi_linkup = 0;
   8656 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8657 		}
   8658 		/* Update LED */
   8659 		wm_tbi_serdes_set_linkled(sc);
   8660 	} else if (icr & ICR_RXSEQ) {
   8661 		DPRINTF(WM_DEBUG_LINK,
   8662 		    ("%s: LINK: Receive sequence error\n",
   8663 		    device_xname(sc->sc_dev)));
   8664 	}
   8665 }
   8666 
   8667 /*
   8668  * wm_linkintr_serdes:
   8669  *
   8670  *	Helper; handle link interrupts for TBI mode.
   8671  */
   8672 static void
   8673 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8674 {
   8675 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8676 	struct mii_data *mii = &sc->sc_mii;
   8677 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8678 	uint32_t pcs_adv, pcs_lpab, reg;
   8679 
   8680 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8681 		__func__));
   8682 
   8683 	if (icr & ICR_LSC) {
   8684 		/* Check PCS */
   8685 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8686 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8687 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8688 				device_xname(sc->sc_dev)));
   8689 			mii->mii_media_status |= IFM_ACTIVE;
   8690 			sc->sc_tbi_linkup = 1;
   8691 			if_link_state_change(ifp, LINK_STATE_UP);
   8692 		} else {
   8693 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8694 				device_xname(sc->sc_dev)));
   8695 			mii->mii_media_status |= IFM_NONE;
   8696 			sc->sc_tbi_linkup = 0;
   8697 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8698 			wm_tbi_serdes_set_linkled(sc);
   8699 			return;
   8700 		}
   8701 		mii->mii_media_active |= IFM_1000_SX;
   8702 		if ((reg & PCS_LSTS_FDX) != 0)
   8703 			mii->mii_media_active |= IFM_FDX;
   8704 		else
   8705 			mii->mii_media_active |= IFM_HDX;
   8706 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8707 			/* Check flow */
   8708 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8709 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8710 				DPRINTF(WM_DEBUG_LINK,
   8711 				    ("XXX LINKOK but not ACOMP\n"));
   8712 				return;
   8713 			}
   8714 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8715 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8716 			DPRINTF(WM_DEBUG_LINK,
   8717 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8718 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8719 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8720 				mii->mii_media_active |= IFM_FLOW
   8721 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8722 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8723 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8724 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8725 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8726 				mii->mii_media_active |= IFM_FLOW
   8727 				    | IFM_ETH_TXPAUSE;
   8728 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8729 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8730 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8731 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8732 				mii->mii_media_active |= IFM_FLOW
   8733 				    | IFM_ETH_RXPAUSE;
   8734 		}
   8735 		/* Update LED */
   8736 		wm_tbi_serdes_set_linkled(sc);
   8737 	} else {
   8738 		DPRINTF(WM_DEBUG_LINK,
   8739 		    ("%s: LINK: Receive sequence error\n",
   8740 		    device_xname(sc->sc_dev)));
   8741 	}
   8742 }
   8743 
   8744 /*
   8745  * wm_linkintr:
   8746  *
   8747  *	Helper; handle link interrupts.
   8748  */
   8749 static void
   8750 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8751 {
   8752 
   8753 	KASSERT(WM_CORE_LOCKED(sc));
   8754 
   8755 	if (sc->sc_flags & WM_F_HAS_MII)
   8756 		wm_linkintr_gmii(sc, icr);
   8757 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8758 	    && (sc->sc_type >= WM_T_82575))
   8759 		wm_linkintr_serdes(sc, icr);
   8760 	else
   8761 		wm_linkintr_tbi(sc, icr);
   8762 }
   8763 
   8764 /*
   8765  * wm_intr_legacy:
   8766  *
   8767  *	Interrupt service routine for INTx and MSI.
   8768  */
   8769 static int
   8770 wm_intr_legacy(void *arg)
   8771 {
   8772 	struct wm_softc *sc = arg;
   8773 	struct wm_queue *wmq = &sc->sc_queue[0];
   8774 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8775 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8776 	uint32_t icr, rndval = 0;
   8777 	int handled = 0;
   8778 
   8779 	while (1 /* CONSTCOND */) {
   8780 		icr = CSR_READ(sc, WMREG_ICR);
   8781 		if ((icr & sc->sc_icr) == 0)
   8782 			break;
   8783 		if (handled == 0) {
   8784 			DPRINTF(WM_DEBUG_TX,
   8785 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8786 		}
   8787 		if (rndval == 0)
   8788 			rndval = icr;
   8789 
   8790 		mutex_enter(rxq->rxq_lock);
   8791 
   8792 		if (rxq->rxq_stopping) {
   8793 			mutex_exit(rxq->rxq_lock);
   8794 			break;
   8795 		}
   8796 
   8797 		handled = 1;
   8798 
   8799 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8800 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8801 			DPRINTF(WM_DEBUG_RX,
   8802 			    ("%s: RX: got Rx intr 0x%08x\n",
   8803 			    device_xname(sc->sc_dev),
   8804 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8805 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8806 		}
   8807 #endif
   8808 		/*
   8809 		 * wm_rxeof() does *not* call upper layer functions directly,
   8810 		 * as if_percpuq_enqueue() just call softint_schedule().
   8811 		 * So, we can call wm_rxeof() in interrupt context.
   8812 		 */
   8813 		wm_rxeof(rxq, UINT_MAX);
   8814 
   8815 		mutex_exit(rxq->rxq_lock);
   8816 		mutex_enter(txq->txq_lock);
   8817 
   8818 		if (txq->txq_stopping) {
   8819 			mutex_exit(txq->txq_lock);
   8820 			break;
   8821 		}
   8822 
   8823 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8824 		if (icr & ICR_TXDW) {
   8825 			DPRINTF(WM_DEBUG_TX,
   8826 			    ("%s: TX: got TXDW interrupt\n",
   8827 			    device_xname(sc->sc_dev)));
   8828 			WM_Q_EVCNT_INCR(txq, txdw);
   8829 		}
   8830 #endif
   8831 		wm_txeof(sc, txq);
   8832 
   8833 		mutex_exit(txq->txq_lock);
   8834 		WM_CORE_LOCK(sc);
   8835 
   8836 		if (sc->sc_core_stopping) {
   8837 			WM_CORE_UNLOCK(sc);
   8838 			break;
   8839 		}
   8840 
   8841 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8842 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8843 			wm_linkintr(sc, icr);
   8844 		}
   8845 
   8846 		WM_CORE_UNLOCK(sc);
   8847 
   8848 		if (icr & ICR_RXO) {
   8849 #if defined(WM_DEBUG)
   8850 			log(LOG_WARNING, "%s: Receive overrun\n",
   8851 			    device_xname(sc->sc_dev));
   8852 #endif /* defined(WM_DEBUG) */
   8853 		}
   8854 	}
   8855 
   8856 	rnd_add_uint32(&sc->rnd_source, rndval);
   8857 
   8858 	if (handled) {
   8859 		/* Try to get more packets going. */
   8860 		softint_schedule(wmq->wmq_si);
   8861 	}
   8862 
   8863 	return handled;
   8864 }
   8865 
   8866 static inline void
   8867 wm_txrxintr_disable(struct wm_queue *wmq)
   8868 {
   8869 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8870 
   8871 	if (sc->sc_type == WM_T_82574)
   8872 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8873 	else if (sc->sc_type == WM_T_82575)
   8874 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8875 	else
   8876 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8877 }
   8878 
   8879 static inline void
   8880 wm_txrxintr_enable(struct wm_queue *wmq)
   8881 {
   8882 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8883 
   8884 	wm_itrs_calculate(sc, wmq);
   8885 
   8886 	if (sc->sc_type == WM_T_82574)
   8887 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8888 	else if (sc->sc_type == WM_T_82575)
   8889 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8890 	else
   8891 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8892 }
   8893 
   8894 static int
   8895 wm_txrxintr_msix(void *arg)
   8896 {
   8897 	struct wm_queue *wmq = arg;
   8898 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8899 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8900 	struct wm_softc *sc = txq->txq_sc;
   8901 	u_int limit = sc->sc_rx_intr_process_limit;
   8902 
   8903 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8904 
   8905 	DPRINTF(WM_DEBUG_TX,
   8906 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8907 
   8908 	wm_txrxintr_disable(wmq);
   8909 
   8910 	mutex_enter(txq->txq_lock);
   8911 
   8912 	if (txq->txq_stopping) {
   8913 		mutex_exit(txq->txq_lock);
   8914 		return 0;
   8915 	}
   8916 
   8917 	WM_Q_EVCNT_INCR(txq, txdw);
   8918 	wm_txeof(sc, txq);
   8919 	/* wm_deferred start() is done in wm_handle_queue(). */
   8920 	mutex_exit(txq->txq_lock);
   8921 
   8922 	DPRINTF(WM_DEBUG_RX,
   8923 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8924 	mutex_enter(rxq->rxq_lock);
   8925 
   8926 	if (rxq->rxq_stopping) {
   8927 		mutex_exit(rxq->rxq_lock);
   8928 		return 0;
   8929 	}
   8930 
   8931 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8932 	wm_rxeof(rxq, limit);
   8933 	mutex_exit(rxq->rxq_lock);
   8934 
   8935 	wm_itrs_writereg(sc, wmq);
   8936 
   8937 	softint_schedule(wmq->wmq_si);
   8938 
   8939 	return 1;
   8940 }
   8941 
   8942 static void
   8943 wm_handle_queue(void *arg)
   8944 {
   8945 	struct wm_queue *wmq = arg;
   8946 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8947 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8948 	struct wm_softc *sc = txq->txq_sc;
   8949 	u_int limit = sc->sc_rx_process_limit;
   8950 
   8951 	mutex_enter(txq->txq_lock);
   8952 	if (txq->txq_stopping) {
   8953 		mutex_exit(txq->txq_lock);
   8954 		return;
   8955 	}
   8956 	wm_txeof(sc, txq);
   8957 	wm_deferred_start_locked(txq);
   8958 	mutex_exit(txq->txq_lock);
   8959 
   8960 	mutex_enter(rxq->rxq_lock);
   8961 	if (rxq->rxq_stopping) {
   8962 		mutex_exit(rxq->rxq_lock);
   8963 		return;
   8964 	}
   8965 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8966 	wm_rxeof(rxq, limit);
   8967 	mutex_exit(rxq->rxq_lock);
   8968 
   8969 	wm_txrxintr_enable(wmq);
   8970 }
   8971 
   8972 /*
   8973  * wm_linkintr_msix:
   8974  *
   8975  *	Interrupt service routine for link status change for MSI-X.
   8976  */
   8977 static int
   8978 wm_linkintr_msix(void *arg)
   8979 {
   8980 	struct wm_softc *sc = arg;
   8981 	uint32_t reg;
   8982 
   8983 	DPRINTF(WM_DEBUG_LINK,
   8984 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8985 
   8986 	reg = CSR_READ(sc, WMREG_ICR);
   8987 	WM_CORE_LOCK(sc);
   8988 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8989 		goto out;
   8990 
   8991 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8992 	wm_linkintr(sc, ICR_LSC);
   8993 
   8994 out:
   8995 	WM_CORE_UNLOCK(sc);
   8996 
   8997 	if (sc->sc_type == WM_T_82574)
   8998 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8999 	else if (sc->sc_type == WM_T_82575)
   9000 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9001 	else
   9002 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9003 
   9004 	return 1;
   9005 }
   9006 
   9007 /*
   9008  * Media related.
   9009  * GMII, SGMII, TBI (and SERDES)
   9010  */
   9011 
   9012 /* Common */
   9013 
   9014 /*
   9015  * wm_tbi_serdes_set_linkled:
   9016  *
   9017  *	Update the link LED on TBI and SERDES devices.
   9018  */
   9019 static void
   9020 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9021 {
   9022 
   9023 	if (sc->sc_tbi_linkup)
   9024 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9025 	else
   9026 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9027 
   9028 	/* 82540 or newer devices are active low */
   9029 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9030 
   9031 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9032 }
   9033 
   9034 /* GMII related */
   9035 
   9036 /*
   9037  * wm_gmii_reset:
   9038  *
   9039  *	Reset the PHY.
   9040  */
   9041 static void
   9042 wm_gmii_reset(struct wm_softc *sc)
   9043 {
   9044 	uint32_t reg;
   9045 	int rv;
   9046 
   9047 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9048 		device_xname(sc->sc_dev), __func__));
   9049 
   9050 	rv = sc->phy.acquire(sc);
   9051 	if (rv != 0) {
   9052 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9053 		    __func__);
   9054 		return;
   9055 	}
   9056 
   9057 	switch (sc->sc_type) {
   9058 	case WM_T_82542_2_0:
   9059 	case WM_T_82542_2_1:
   9060 		/* null */
   9061 		break;
   9062 	case WM_T_82543:
   9063 		/*
   9064 		 * With 82543, we need to force speed and duplex on the MAC
   9065 		 * equal to what the PHY speed and duplex configuration is.
   9066 		 * In addition, we need to perform a hardware reset on the PHY
   9067 		 * to take it out of reset.
   9068 		 */
   9069 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9070 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9071 
   9072 		/* The PHY reset pin is active-low. */
   9073 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9074 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9075 		    CTRL_EXT_SWDPIN(4));
   9076 		reg |= CTRL_EXT_SWDPIO(4);
   9077 
   9078 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9079 		CSR_WRITE_FLUSH(sc);
   9080 		delay(10*1000);
   9081 
   9082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9083 		CSR_WRITE_FLUSH(sc);
   9084 		delay(150);
   9085 #if 0
   9086 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9087 #endif
   9088 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9089 		break;
   9090 	case WM_T_82544:	/* reset 10000us */
   9091 	case WM_T_82540:
   9092 	case WM_T_82545:
   9093 	case WM_T_82545_3:
   9094 	case WM_T_82546:
   9095 	case WM_T_82546_3:
   9096 	case WM_T_82541:
   9097 	case WM_T_82541_2:
   9098 	case WM_T_82547:
   9099 	case WM_T_82547_2:
   9100 	case WM_T_82571:	/* reset 100us */
   9101 	case WM_T_82572:
   9102 	case WM_T_82573:
   9103 	case WM_T_82574:
   9104 	case WM_T_82575:
   9105 	case WM_T_82576:
   9106 	case WM_T_82580:
   9107 	case WM_T_I350:
   9108 	case WM_T_I354:
   9109 	case WM_T_I210:
   9110 	case WM_T_I211:
   9111 	case WM_T_82583:
   9112 	case WM_T_80003:
   9113 		/* generic reset */
   9114 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9115 		CSR_WRITE_FLUSH(sc);
   9116 		delay(20000);
   9117 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9118 		CSR_WRITE_FLUSH(sc);
   9119 		delay(20000);
   9120 
   9121 		if ((sc->sc_type == WM_T_82541)
   9122 		    || (sc->sc_type == WM_T_82541_2)
   9123 		    || (sc->sc_type == WM_T_82547)
   9124 		    || (sc->sc_type == WM_T_82547_2)) {
   9125 			/* workaround for igp are done in igp_reset() */
   9126 			/* XXX add code to set LED after phy reset */
   9127 		}
   9128 		break;
   9129 	case WM_T_ICH8:
   9130 	case WM_T_ICH9:
   9131 	case WM_T_ICH10:
   9132 	case WM_T_PCH:
   9133 	case WM_T_PCH2:
   9134 	case WM_T_PCH_LPT:
   9135 	case WM_T_PCH_SPT:
   9136 		/* generic reset */
   9137 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9138 		CSR_WRITE_FLUSH(sc);
   9139 		delay(100);
   9140 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9141 		CSR_WRITE_FLUSH(sc);
   9142 		delay(150);
   9143 		break;
   9144 	default:
   9145 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9146 		    __func__);
   9147 		break;
   9148 	}
   9149 
   9150 	sc->phy.release(sc);
   9151 
   9152 	/* get_cfg_done */
   9153 	wm_get_cfg_done(sc);
   9154 
   9155 	/* extra setup */
   9156 	switch (sc->sc_type) {
   9157 	case WM_T_82542_2_0:
   9158 	case WM_T_82542_2_1:
   9159 	case WM_T_82543:
   9160 	case WM_T_82544:
   9161 	case WM_T_82540:
   9162 	case WM_T_82545:
   9163 	case WM_T_82545_3:
   9164 	case WM_T_82546:
   9165 	case WM_T_82546_3:
   9166 	case WM_T_82541_2:
   9167 	case WM_T_82547_2:
   9168 	case WM_T_82571:
   9169 	case WM_T_82572:
   9170 	case WM_T_82573:
   9171 	case WM_T_82574:
   9172 	case WM_T_82583:
   9173 	case WM_T_82575:
   9174 	case WM_T_82576:
   9175 	case WM_T_82580:
   9176 	case WM_T_I350:
   9177 	case WM_T_I354:
   9178 	case WM_T_I210:
   9179 	case WM_T_I211:
   9180 	case WM_T_80003:
   9181 		/* null */
   9182 		break;
   9183 	case WM_T_82541:
   9184 	case WM_T_82547:
   9185 		/* XXX Configure actively LED after PHY reset */
   9186 		break;
   9187 	case WM_T_ICH8:
   9188 	case WM_T_ICH9:
   9189 	case WM_T_ICH10:
   9190 	case WM_T_PCH:
   9191 	case WM_T_PCH2:
   9192 	case WM_T_PCH_LPT:
   9193 	case WM_T_PCH_SPT:
   9194 		wm_phy_post_reset(sc);
   9195 		break;
   9196 	default:
   9197 		panic("%s: unknown type\n", __func__);
   9198 		break;
   9199 	}
   9200 }
   9201 
   9202 /*
   9203  * Setup sc_phytype and mii_{read|write}reg.
   9204  *
   9205  *  To identify PHY type, correct read/write function should be selected.
   9206  * To select correct read/write function, PCI ID or MAC type are required
   9207  * without accessing PHY registers.
   9208  *
   9209  *  On the first call of this function, PHY ID is not known yet. Check
   9210  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9211  * result might be incorrect.
   9212  *
   9213  *  In the second call, PHY OUI and model is used to identify PHY type.
   9214  * It might not be perfpect because of the lack of compared entry, but it
   9215  * would be better than the first call.
   9216  *
   9217  *  If the detected new result and previous assumption is different,
   9218  * diagnous message will be printed.
   9219  */
   9220 static void
   9221 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9222     uint16_t phy_model)
   9223 {
   9224 	device_t dev = sc->sc_dev;
   9225 	struct mii_data *mii = &sc->sc_mii;
   9226 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9227 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9228 	mii_readreg_t new_readreg;
   9229 	mii_writereg_t new_writereg;
   9230 
   9231 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9232 		device_xname(sc->sc_dev), __func__));
   9233 
   9234 	if (mii->mii_readreg == NULL) {
   9235 		/*
   9236 		 *  This is the first call of this function. For ICH and PCH
   9237 		 * variants, it's difficult to determine the PHY access method
   9238 		 * by sc_type, so use the PCI product ID for some devices.
   9239 		 */
   9240 
   9241 		switch (sc->sc_pcidevid) {
   9242 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9243 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9244 			/* 82577 */
   9245 			new_phytype = WMPHY_82577;
   9246 			break;
   9247 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9248 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9249 			/* 82578 */
   9250 			new_phytype = WMPHY_82578;
   9251 			break;
   9252 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9253 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9254 			/* 82579 */
   9255 			new_phytype = WMPHY_82579;
   9256 			break;
   9257 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9258 		case PCI_PRODUCT_INTEL_82801I_BM:
   9259 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9260 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9261 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9262 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9263 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9264 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9265 			/* ICH8, 9, 10 with 82567 */
   9266 			new_phytype = WMPHY_BM;
   9267 			break;
   9268 		default:
   9269 			break;
   9270 		}
   9271 	} else {
   9272 		/* It's not the first call. Use PHY OUI and model */
   9273 		switch (phy_oui) {
   9274 		case MII_OUI_ATHEROS: /* XXX ??? */
   9275 			switch (phy_model) {
   9276 			case 0x0004: /* XXX */
   9277 				new_phytype = WMPHY_82578;
   9278 				break;
   9279 			default:
   9280 				break;
   9281 			}
   9282 			break;
   9283 		case MII_OUI_xxMARVELL:
   9284 			switch (phy_model) {
   9285 			case MII_MODEL_xxMARVELL_I210:
   9286 				new_phytype = WMPHY_I210;
   9287 				break;
   9288 			case MII_MODEL_xxMARVELL_E1011:
   9289 			case MII_MODEL_xxMARVELL_E1000_3:
   9290 			case MII_MODEL_xxMARVELL_E1000_5:
   9291 			case MII_MODEL_xxMARVELL_E1112:
   9292 				new_phytype = WMPHY_M88;
   9293 				break;
   9294 			case MII_MODEL_xxMARVELL_E1149:
   9295 				new_phytype = WMPHY_BM;
   9296 				break;
   9297 			case MII_MODEL_xxMARVELL_E1111:
   9298 			case MII_MODEL_xxMARVELL_I347:
   9299 			case MII_MODEL_xxMARVELL_E1512:
   9300 			case MII_MODEL_xxMARVELL_E1340M:
   9301 			case MII_MODEL_xxMARVELL_E1543:
   9302 				new_phytype = WMPHY_M88;
   9303 				break;
   9304 			case MII_MODEL_xxMARVELL_I82563:
   9305 				new_phytype = WMPHY_GG82563;
   9306 				break;
   9307 			default:
   9308 				break;
   9309 			}
   9310 			break;
   9311 		case MII_OUI_INTEL:
   9312 			switch (phy_model) {
   9313 			case MII_MODEL_INTEL_I82577:
   9314 				new_phytype = WMPHY_82577;
   9315 				break;
   9316 			case MII_MODEL_INTEL_I82579:
   9317 				new_phytype = WMPHY_82579;
   9318 				break;
   9319 			case MII_MODEL_INTEL_I217:
   9320 				new_phytype = WMPHY_I217;
   9321 				break;
   9322 			case MII_MODEL_INTEL_I82580:
   9323 			case MII_MODEL_INTEL_I350:
   9324 				new_phytype = WMPHY_82580;
   9325 				break;
   9326 			default:
   9327 				break;
   9328 			}
   9329 			break;
   9330 		case MII_OUI_yyINTEL:
   9331 			switch (phy_model) {
   9332 			case MII_MODEL_yyINTEL_I82562G:
   9333 			case MII_MODEL_yyINTEL_I82562EM:
   9334 			case MII_MODEL_yyINTEL_I82562ET:
   9335 				new_phytype = WMPHY_IFE;
   9336 				break;
   9337 			case MII_MODEL_yyINTEL_IGP01E1000:
   9338 				new_phytype = WMPHY_IGP;
   9339 				break;
   9340 			case MII_MODEL_yyINTEL_I82566:
   9341 				new_phytype = WMPHY_IGP_3;
   9342 				break;
   9343 			default:
   9344 				break;
   9345 			}
   9346 			break;
   9347 		default:
   9348 			break;
   9349 		}
   9350 		if (new_phytype == WMPHY_UNKNOWN)
   9351 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9352 			    __func__);
   9353 
   9354 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9355 		    && (sc->sc_phytype != new_phytype )) {
   9356 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9357 			    "was incorrect. PHY type from PHY ID = %u\n",
   9358 			    sc->sc_phytype, new_phytype);
   9359 		}
   9360 	}
   9361 
   9362 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9363 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9364 		/* SGMII */
   9365 		new_readreg = wm_sgmii_readreg;
   9366 		new_writereg = wm_sgmii_writereg;
   9367 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9368 		/* BM2 (phyaddr == 1) */
   9369 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9370 		    && (new_phytype != WMPHY_BM)
   9371 		    && (new_phytype != WMPHY_UNKNOWN))
   9372 			doubt_phytype = new_phytype;
   9373 		new_phytype = WMPHY_BM;
   9374 		new_readreg = wm_gmii_bm_readreg;
   9375 		new_writereg = wm_gmii_bm_writereg;
   9376 	} else if (sc->sc_type >= WM_T_PCH) {
   9377 		/* All PCH* use _hv_ */
   9378 		new_readreg = wm_gmii_hv_readreg;
   9379 		new_writereg = wm_gmii_hv_writereg;
   9380 	} else if (sc->sc_type >= WM_T_ICH8) {
   9381 		/* non-82567 ICH8, 9 and 10 */
   9382 		new_readreg = wm_gmii_i82544_readreg;
   9383 		new_writereg = wm_gmii_i82544_writereg;
   9384 	} else if (sc->sc_type >= WM_T_80003) {
   9385 		/* 80003 */
   9386 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9387 		    && (new_phytype != WMPHY_GG82563)
   9388 		    && (new_phytype != WMPHY_UNKNOWN))
   9389 			doubt_phytype = new_phytype;
   9390 		new_phytype = WMPHY_GG82563;
   9391 		new_readreg = wm_gmii_i80003_readreg;
   9392 		new_writereg = wm_gmii_i80003_writereg;
   9393 	} else if (sc->sc_type >= WM_T_I210) {
   9394 		/* I210 and I211 */
   9395 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9396 		    && (new_phytype != WMPHY_I210)
   9397 		    && (new_phytype != WMPHY_UNKNOWN))
   9398 			doubt_phytype = new_phytype;
   9399 		new_phytype = WMPHY_I210;
   9400 		new_readreg = wm_gmii_gs40g_readreg;
   9401 		new_writereg = wm_gmii_gs40g_writereg;
   9402 	} else if (sc->sc_type >= WM_T_82580) {
   9403 		/* 82580, I350 and I354 */
   9404 		new_readreg = wm_gmii_82580_readreg;
   9405 		new_writereg = wm_gmii_82580_writereg;
   9406 	} else if (sc->sc_type >= WM_T_82544) {
   9407 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9408 		new_readreg = wm_gmii_i82544_readreg;
   9409 		new_writereg = wm_gmii_i82544_writereg;
   9410 	} else {
   9411 		new_readreg = wm_gmii_i82543_readreg;
   9412 		new_writereg = wm_gmii_i82543_writereg;
   9413 	}
   9414 
   9415 	if (new_phytype == WMPHY_BM) {
   9416 		/* All BM use _bm_ */
   9417 		new_readreg = wm_gmii_bm_readreg;
   9418 		new_writereg = wm_gmii_bm_writereg;
   9419 	}
   9420 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9421 		/* All PCH* use _hv_ */
   9422 		new_readreg = wm_gmii_hv_readreg;
   9423 		new_writereg = wm_gmii_hv_writereg;
   9424 	}
   9425 
   9426 	/* Diag output */
   9427 	if (doubt_phytype != WMPHY_UNKNOWN)
   9428 		aprint_error_dev(dev, "Assumed new PHY type was "
   9429 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9430 		    new_phytype);
   9431 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9432 	    && (sc->sc_phytype != new_phytype ))
   9433 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9434 		    "was incorrect. New PHY type = %u\n",
   9435 		    sc->sc_phytype, new_phytype);
   9436 
   9437 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9438 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9439 
   9440 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9441 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9442 		    "function was incorrect.\n");
   9443 
   9444 	/* Update now */
   9445 	sc->sc_phytype = new_phytype;
   9446 	mii->mii_readreg = new_readreg;
   9447 	mii->mii_writereg = new_writereg;
   9448 }
   9449 
   9450 /*
   9451  * wm_get_phy_id_82575:
   9452  *
   9453  * Return PHY ID. Return -1 if it failed.
   9454  */
   9455 static int
   9456 wm_get_phy_id_82575(struct wm_softc *sc)
   9457 {
   9458 	uint32_t reg;
   9459 	int phyid = -1;
   9460 
   9461 	/* XXX */
   9462 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9463 		return -1;
   9464 
   9465 	if (wm_sgmii_uses_mdio(sc)) {
   9466 		switch (sc->sc_type) {
   9467 		case WM_T_82575:
   9468 		case WM_T_82576:
   9469 			reg = CSR_READ(sc, WMREG_MDIC);
   9470 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9471 			break;
   9472 		case WM_T_82580:
   9473 		case WM_T_I350:
   9474 		case WM_T_I354:
   9475 		case WM_T_I210:
   9476 		case WM_T_I211:
   9477 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9478 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9479 			break;
   9480 		default:
   9481 			return -1;
   9482 		}
   9483 	}
   9484 
   9485 	return phyid;
   9486 }
   9487 
   9488 
   9489 /*
   9490  * wm_gmii_mediainit:
   9491  *
   9492  *	Initialize media for use on 1000BASE-T devices.
   9493  */
   9494 static void
   9495 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9496 {
   9497 	device_t dev = sc->sc_dev;
   9498 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9499 	struct mii_data *mii = &sc->sc_mii;
   9500 	uint32_t reg;
   9501 
   9502 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9503 		device_xname(sc->sc_dev), __func__));
   9504 
   9505 	/* We have GMII. */
   9506 	sc->sc_flags |= WM_F_HAS_MII;
   9507 
   9508 	if (sc->sc_type == WM_T_80003)
   9509 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9510 	else
   9511 		sc->sc_tipg = TIPG_1000T_DFLT;
   9512 
   9513 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9514 	if ((sc->sc_type == WM_T_82580)
   9515 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9516 	    || (sc->sc_type == WM_T_I211)) {
   9517 		reg = CSR_READ(sc, WMREG_PHPM);
   9518 		reg &= ~PHPM_GO_LINK_D;
   9519 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9520 	}
   9521 
   9522 	/*
   9523 	 * Let the chip set speed/duplex on its own based on
   9524 	 * signals from the PHY.
   9525 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9526 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9527 	 */
   9528 	sc->sc_ctrl |= CTRL_SLU;
   9529 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9530 
   9531 	/* Initialize our media structures and probe the GMII. */
   9532 	mii->mii_ifp = ifp;
   9533 
   9534 	mii->mii_statchg = wm_gmii_statchg;
   9535 
   9536 	/* get PHY control from SMBus to PCIe */
   9537 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9538 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9539 		wm_smbustopci(sc);
   9540 
   9541 	wm_gmii_reset(sc);
   9542 
   9543 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9544 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9545 	    wm_gmii_mediastatus);
   9546 
   9547 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9548 	    || (sc->sc_type == WM_T_82580)
   9549 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9550 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9551 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9552 			/* Attach only one port */
   9553 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9554 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9555 		} else {
   9556 			int i, id;
   9557 			uint32_t ctrl_ext;
   9558 
   9559 			id = wm_get_phy_id_82575(sc);
   9560 			if (id != -1) {
   9561 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9562 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9563 			}
   9564 			if ((id == -1)
   9565 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9566 				/* Power on sgmii phy if it is disabled */
   9567 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9568 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9569 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9570 				CSR_WRITE_FLUSH(sc);
   9571 				delay(300*1000); /* XXX too long */
   9572 
   9573 				/* from 1 to 8 */
   9574 				for (i = 1; i < 8; i++)
   9575 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9576 					    0xffffffff, i, MII_OFFSET_ANY,
   9577 					    MIIF_DOPAUSE);
   9578 
   9579 				/* restore previous sfp cage power state */
   9580 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9581 			}
   9582 		}
   9583 	} else {
   9584 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9585 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9586 	}
   9587 
   9588 	/*
   9589 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9590 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9591 	 */
   9592 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9593 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9594 		wm_set_mdio_slow_mode_hv(sc);
   9595 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9596 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9597 	}
   9598 
   9599 	/*
   9600 	 * (For ICH8 variants)
   9601 	 * If PHY detection failed, use BM's r/w function and retry.
   9602 	 */
   9603 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9604 		/* if failed, retry with *_bm_* */
   9605 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9606 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9607 		    sc->sc_phytype);
   9608 		sc->sc_phytype = WMPHY_BM;
   9609 		mii->mii_readreg = wm_gmii_bm_readreg;
   9610 		mii->mii_writereg = wm_gmii_bm_writereg;
   9611 
   9612 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9613 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9614 	}
   9615 
   9616 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9617 		/* Any PHY wasn't find */
   9618 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9619 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9620 		sc->sc_phytype = WMPHY_NONE;
   9621 	} else {
   9622 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9623 
   9624 		/*
   9625 		 * PHY Found! Check PHY type again by the second call of
   9626 		 * wm_gmii_setup_phytype.
   9627 		 */
   9628 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9629 		    child->mii_mpd_model);
   9630 
   9631 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9632 	}
   9633 }
   9634 
   9635 /*
   9636  * wm_gmii_mediachange:	[ifmedia interface function]
   9637  *
   9638  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9639  */
   9640 static int
   9641 wm_gmii_mediachange(struct ifnet *ifp)
   9642 {
   9643 	struct wm_softc *sc = ifp->if_softc;
   9644 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9645 	int rc;
   9646 
   9647 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9648 		device_xname(sc->sc_dev), __func__));
   9649 	if ((ifp->if_flags & IFF_UP) == 0)
   9650 		return 0;
   9651 
   9652 	/* Disable D0 LPLU. */
   9653 	wm_lplu_d0_disable(sc);
   9654 
   9655 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9656 	sc->sc_ctrl |= CTRL_SLU;
   9657 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9658 	    || (sc->sc_type > WM_T_82543)) {
   9659 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9660 	} else {
   9661 		sc->sc_ctrl &= ~CTRL_ASDE;
   9662 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9663 		if (ife->ifm_media & IFM_FDX)
   9664 			sc->sc_ctrl |= CTRL_FD;
   9665 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9666 		case IFM_10_T:
   9667 			sc->sc_ctrl |= CTRL_SPEED_10;
   9668 			break;
   9669 		case IFM_100_TX:
   9670 			sc->sc_ctrl |= CTRL_SPEED_100;
   9671 			break;
   9672 		case IFM_1000_T:
   9673 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9674 			break;
   9675 		default:
   9676 			panic("wm_gmii_mediachange: bad media 0x%x",
   9677 			    ife->ifm_media);
   9678 		}
   9679 	}
   9680 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9681 	CSR_WRITE_FLUSH(sc);
   9682 	if (sc->sc_type <= WM_T_82543)
   9683 		wm_gmii_reset(sc);
   9684 
   9685 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9686 		return 0;
   9687 	return rc;
   9688 }
   9689 
   9690 /*
   9691  * wm_gmii_mediastatus:	[ifmedia interface function]
   9692  *
   9693  *	Get the current interface media status on a 1000BASE-T device.
   9694  */
   9695 static void
   9696 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9697 {
   9698 	struct wm_softc *sc = ifp->if_softc;
   9699 
   9700 	ether_mediastatus(ifp, ifmr);
   9701 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9702 	    | sc->sc_flowflags;
   9703 }
   9704 
   9705 #define	MDI_IO		CTRL_SWDPIN(2)
   9706 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9707 #define	MDI_CLK		CTRL_SWDPIN(3)
   9708 
   9709 static void
   9710 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9711 {
   9712 	uint32_t i, v;
   9713 
   9714 	v = CSR_READ(sc, WMREG_CTRL);
   9715 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9716 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9717 
   9718 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9719 		if (data & i)
   9720 			v |= MDI_IO;
   9721 		else
   9722 			v &= ~MDI_IO;
   9723 		CSR_WRITE(sc, WMREG_CTRL, v);
   9724 		CSR_WRITE_FLUSH(sc);
   9725 		delay(10);
   9726 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9727 		CSR_WRITE_FLUSH(sc);
   9728 		delay(10);
   9729 		CSR_WRITE(sc, WMREG_CTRL, v);
   9730 		CSR_WRITE_FLUSH(sc);
   9731 		delay(10);
   9732 	}
   9733 }
   9734 
   9735 static uint32_t
   9736 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9737 {
   9738 	uint32_t v, i, data = 0;
   9739 
   9740 	v = CSR_READ(sc, WMREG_CTRL);
   9741 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9742 	v |= CTRL_SWDPIO(3);
   9743 
   9744 	CSR_WRITE(sc, WMREG_CTRL, v);
   9745 	CSR_WRITE_FLUSH(sc);
   9746 	delay(10);
   9747 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9748 	CSR_WRITE_FLUSH(sc);
   9749 	delay(10);
   9750 	CSR_WRITE(sc, WMREG_CTRL, v);
   9751 	CSR_WRITE_FLUSH(sc);
   9752 	delay(10);
   9753 
   9754 	for (i = 0; i < 16; i++) {
   9755 		data <<= 1;
   9756 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9757 		CSR_WRITE_FLUSH(sc);
   9758 		delay(10);
   9759 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9760 			data |= 1;
   9761 		CSR_WRITE(sc, WMREG_CTRL, v);
   9762 		CSR_WRITE_FLUSH(sc);
   9763 		delay(10);
   9764 	}
   9765 
   9766 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9767 	CSR_WRITE_FLUSH(sc);
   9768 	delay(10);
   9769 	CSR_WRITE(sc, WMREG_CTRL, v);
   9770 	CSR_WRITE_FLUSH(sc);
   9771 	delay(10);
   9772 
   9773 	return data;
   9774 }
   9775 
   9776 #undef MDI_IO
   9777 #undef MDI_DIR
   9778 #undef MDI_CLK
   9779 
   9780 /*
   9781  * wm_gmii_i82543_readreg:	[mii interface function]
   9782  *
   9783  *	Read a PHY register on the GMII (i82543 version).
   9784  */
   9785 static int
   9786 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9787 {
   9788 	struct wm_softc *sc = device_private(dev);
   9789 	int rv;
   9790 
   9791 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9792 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9793 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9794 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9795 
   9796 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9797 	    device_xname(dev), phy, reg, rv));
   9798 
   9799 	return rv;
   9800 }
   9801 
   9802 /*
   9803  * wm_gmii_i82543_writereg:	[mii interface function]
   9804  *
   9805  *	Write a PHY register on the GMII (i82543 version).
   9806  */
   9807 static void
   9808 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9809 {
   9810 	struct wm_softc *sc = device_private(dev);
   9811 
   9812 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9813 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9814 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9815 	    (MII_COMMAND_START << 30), 32);
   9816 }
   9817 
   9818 /*
   9819  * wm_gmii_mdic_readreg:	[mii interface function]
   9820  *
   9821  *	Read a PHY register on the GMII.
   9822  */
   9823 static int
   9824 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9825 {
   9826 	struct wm_softc *sc = device_private(dev);
   9827 	uint32_t mdic = 0;
   9828 	int i, rv;
   9829 
   9830 	if (reg > MII_ADDRMASK) {
   9831 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9832 		    __func__, sc->sc_phytype, reg);
   9833 		reg &= MII_ADDRMASK;
   9834 	}
   9835 
   9836 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9837 	    MDIC_REGADD(reg));
   9838 
   9839 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9840 		mdic = CSR_READ(sc, WMREG_MDIC);
   9841 		if (mdic & MDIC_READY)
   9842 			break;
   9843 		delay(50);
   9844 	}
   9845 
   9846 	if ((mdic & MDIC_READY) == 0) {
   9847 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9848 		    device_xname(dev), phy, reg);
   9849 		rv = 0;
   9850 	} else if (mdic & MDIC_E) {
   9851 #if 0 /* This is normal if no PHY is present. */
   9852 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9853 		    device_xname(dev), phy, reg);
   9854 #endif
   9855 		rv = 0;
   9856 	} else {
   9857 		rv = MDIC_DATA(mdic);
   9858 		if (rv == 0xffff)
   9859 			rv = 0;
   9860 	}
   9861 
   9862 	return rv;
   9863 }
   9864 
   9865 /*
   9866  * wm_gmii_mdic_writereg:	[mii interface function]
   9867  *
   9868  *	Write a PHY register on the GMII.
   9869  */
   9870 static void
   9871 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9872 {
   9873 	struct wm_softc *sc = device_private(dev);
   9874 	uint32_t mdic = 0;
   9875 	int i;
   9876 
   9877 	if (reg > MII_ADDRMASK) {
   9878 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9879 		    __func__, sc->sc_phytype, reg);
   9880 		reg &= MII_ADDRMASK;
   9881 	}
   9882 
   9883 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9884 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9885 
   9886 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9887 		mdic = CSR_READ(sc, WMREG_MDIC);
   9888 		if (mdic & MDIC_READY)
   9889 			break;
   9890 		delay(50);
   9891 	}
   9892 
   9893 	if ((mdic & MDIC_READY) == 0)
   9894 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9895 		    device_xname(dev), phy, reg);
   9896 	else if (mdic & MDIC_E)
   9897 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9898 		    device_xname(dev), phy, reg);
   9899 }
   9900 
   9901 /*
   9902  * wm_gmii_i82544_readreg:	[mii interface function]
   9903  *
   9904  *	Read a PHY register on the GMII.
   9905  */
   9906 static int
   9907 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9908 {
   9909 	struct wm_softc *sc = device_private(dev);
   9910 	int rv;
   9911 
   9912 	if (sc->phy.acquire(sc)) {
   9913 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9914 		return 0;
   9915 	}
   9916 
   9917 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9918 		switch (sc->sc_phytype) {
   9919 		case WMPHY_IGP:
   9920 		case WMPHY_IGP_2:
   9921 		case WMPHY_IGP_3:
   9922 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9923 			break;
   9924 		default:
   9925 #ifdef WM_DEBUG
   9926 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9927 			    __func__, sc->sc_phytype, reg);
   9928 #endif
   9929 			break;
   9930 		}
   9931 	}
   9932 
   9933 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9934 	sc->phy.release(sc);
   9935 
   9936 	return rv;
   9937 }
   9938 
   9939 /*
   9940  * wm_gmii_i82544_writereg:	[mii interface function]
   9941  *
   9942  *	Write a PHY register on the GMII.
   9943  */
   9944 static void
   9945 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9946 {
   9947 	struct wm_softc *sc = device_private(dev);
   9948 
   9949 	if (sc->phy.acquire(sc)) {
   9950 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9951 		return;
   9952 	}
   9953 
   9954 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9955 		switch (sc->sc_phytype) {
   9956 		case WMPHY_IGP:
   9957 		case WMPHY_IGP_2:
   9958 		case WMPHY_IGP_3:
   9959 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9960 			break;
   9961 		default:
   9962 #ifdef WM_DEBUG
   9963 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9964 			    __func__, sc->sc_phytype, reg);
   9965 #endif
   9966 			break;
   9967 		}
   9968 	}
   9969 
   9970 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9971 	sc->phy.release(sc);
   9972 }
   9973 
   9974 /*
   9975  * wm_gmii_i80003_readreg:	[mii interface function]
   9976  *
   9977  *	Read a PHY register on the kumeran
   9978  * This could be handled by the PHY layer if we didn't have to lock the
   9979  * ressource ...
   9980  */
   9981 static int
   9982 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9983 {
   9984 	struct wm_softc *sc = device_private(dev);
   9985 	int page_select, temp;
   9986 	int rv;
   9987 
   9988 	if (phy != 1) /* only one PHY on kumeran bus */
   9989 		return 0;
   9990 
   9991 	if (sc->phy.acquire(sc)) {
   9992 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9993 		return 0;
   9994 	}
   9995 
   9996 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   9997 		page_select = GG82563_PHY_PAGE_SELECT;
   9998 	else {
   9999 		/*
   10000 		 * Use Alternative Page Select register to access registers
   10001 		 * 30 and 31.
   10002 		 */
   10003 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10004 	}
   10005 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10006 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10007 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10008 		/*
   10009 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10010 		 * register.
   10011 		 */
   10012 		delay(200);
   10013 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10014 			device_printf(dev, "%s failed\n", __func__);
   10015 			rv = 0; /* XXX */
   10016 			goto out;
   10017 		}
   10018 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10019 		delay(200);
   10020 	} else
   10021 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10022 
   10023 out:
   10024 	sc->phy.release(sc);
   10025 	return rv;
   10026 }
   10027 
   10028 /*
   10029  * wm_gmii_i80003_writereg:	[mii interface function]
   10030  *
   10031  *	Write a PHY register on the kumeran.
   10032  * This could be handled by the PHY layer if we didn't have to lock the
   10033  * ressource ...
   10034  */
   10035 static void
   10036 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10037 {
   10038 	struct wm_softc *sc = device_private(dev);
   10039 	int page_select, temp;
   10040 
   10041 	if (phy != 1) /* only one PHY on kumeran bus */
   10042 		return;
   10043 
   10044 	if (sc->phy.acquire(sc)) {
   10045 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10046 		return;
   10047 	}
   10048 
   10049 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10050 		page_select = GG82563_PHY_PAGE_SELECT;
   10051 	else {
   10052 		/*
   10053 		 * Use Alternative Page Select register to access registers
   10054 		 * 30 and 31.
   10055 		 */
   10056 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10057 	}
   10058 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10059 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10060 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10061 		/*
   10062 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10063 		 * register.
   10064 		 */
   10065 		delay(200);
   10066 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10067 			device_printf(dev, "%s failed\n", __func__);
   10068 			goto out;
   10069 		}
   10070 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10071 		delay(200);
   10072 	} else
   10073 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10074 
   10075 out:
   10076 	sc->phy.release(sc);
   10077 }
   10078 
   10079 /*
   10080  * wm_gmii_bm_readreg:	[mii interface function]
   10081  *
   10082  *	Read a PHY register on the kumeran
   10083  * This could be handled by the PHY layer if we didn't have to lock the
   10084  * ressource ...
   10085  */
   10086 static int
   10087 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10088 {
   10089 	struct wm_softc *sc = device_private(dev);
   10090 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10091 	uint16_t val;
   10092 	int rv;
   10093 
   10094 	if (sc->phy.acquire(sc)) {
   10095 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10096 		return 0;
   10097 	}
   10098 
   10099 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10100 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10101 		    || (reg == 31)) ? 1 : phy;
   10102 	/* Page 800 works differently than the rest so it has its own func */
   10103 	if (page == BM_WUC_PAGE) {
   10104 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10105 		rv = val;
   10106 		goto release;
   10107 	}
   10108 
   10109 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10110 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10111 		    && (sc->sc_type != WM_T_82583))
   10112 			wm_gmii_mdic_writereg(dev, phy,
   10113 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10114 		else
   10115 			wm_gmii_mdic_writereg(dev, phy,
   10116 			    BME1000_PHY_PAGE_SELECT, page);
   10117 	}
   10118 
   10119 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10120 
   10121 release:
   10122 	sc->phy.release(sc);
   10123 	return rv;
   10124 }
   10125 
   10126 /*
   10127  * wm_gmii_bm_writereg:	[mii interface function]
   10128  *
   10129  *	Write a PHY register on the kumeran.
   10130  * This could be handled by the PHY layer if we didn't have to lock the
   10131  * ressource ...
   10132  */
   10133 static void
   10134 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10135 {
   10136 	struct wm_softc *sc = device_private(dev);
   10137 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10138 
   10139 	if (sc->phy.acquire(sc)) {
   10140 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10141 		return;
   10142 	}
   10143 
   10144 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10145 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10146 		    || (reg == 31)) ? 1 : phy;
   10147 	/* Page 800 works differently than the rest so it has its own func */
   10148 	if (page == BM_WUC_PAGE) {
   10149 		uint16_t tmp;
   10150 
   10151 		tmp = val;
   10152 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10153 		goto release;
   10154 	}
   10155 
   10156 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10157 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10158 		    && (sc->sc_type != WM_T_82583))
   10159 			wm_gmii_mdic_writereg(dev, phy,
   10160 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10161 		else
   10162 			wm_gmii_mdic_writereg(dev, phy,
   10163 			    BME1000_PHY_PAGE_SELECT, page);
   10164 	}
   10165 
   10166 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10167 
   10168 release:
   10169 	sc->phy.release(sc);
   10170 }
   10171 
   10172 static void
   10173 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10174 {
   10175 	struct wm_softc *sc = device_private(dev);
   10176 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10177 	uint16_t wuce, reg;
   10178 
   10179 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10180 		device_xname(dev), __func__));
   10181 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10182 	if (sc->sc_type == WM_T_PCH) {
   10183 		/* XXX e1000 driver do nothing... why? */
   10184 	}
   10185 
   10186 	/*
   10187 	 * 1) Enable PHY wakeup register first.
   10188 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10189 	 */
   10190 
   10191 	/* Set page 769 */
   10192 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10193 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10194 
   10195 	/* Read WUCE and save it */
   10196 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10197 
   10198 	reg = wuce | BM_WUC_ENABLE_BIT;
   10199 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10200 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10201 
   10202 	/* Select page 800 */
   10203 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10204 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10205 
   10206 	/*
   10207 	 * 2) Access PHY wakeup register.
   10208 	 * See e1000_access_phy_wakeup_reg_bm.
   10209 	 */
   10210 
   10211 	/* Write page 800 */
   10212 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10213 
   10214 	if (rd)
   10215 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10216 	else
   10217 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10218 
   10219 	/*
   10220 	 * 3) Disable PHY wakeup register.
   10221 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10222 	 */
   10223 	/* Set page 769 */
   10224 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10225 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10226 
   10227 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10228 }
   10229 
   10230 /*
   10231  * wm_gmii_hv_readreg:	[mii interface function]
   10232  *
   10233  *	Read a PHY register on the kumeran
   10234  * This could be handled by the PHY layer if we didn't have to lock the
   10235  * ressource ...
   10236  */
   10237 static int
   10238 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10239 {
   10240 	struct wm_softc *sc = device_private(dev);
   10241 	int rv;
   10242 
   10243 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10244 		device_xname(dev), __func__));
   10245 	if (sc->phy.acquire(sc)) {
   10246 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10247 		return 0;
   10248 	}
   10249 
   10250 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10251 	sc->phy.release(sc);
   10252 	return rv;
   10253 }
   10254 
   10255 static int
   10256 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10257 {
   10258 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10259 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10260 	uint16_t val;
   10261 	int rv;
   10262 
   10263 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10264 
   10265 	/* Page 800 works differently than the rest so it has its own func */
   10266 	if (page == BM_WUC_PAGE) {
   10267 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10268 		return val;
   10269 	}
   10270 
   10271 	/*
   10272 	 * Lower than page 768 works differently than the rest so it has its
   10273 	 * own func
   10274 	 */
   10275 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10276 		printf("gmii_hv_readreg!!!\n");
   10277 		return 0;
   10278 	}
   10279 
   10280 	/*
   10281 	 * XXX I21[789] documents say that the SMBus Address register is at
   10282 	 * PHY address 01, Page 0 (not 768), Register 26.
   10283 	 */
   10284 	if (page == HV_INTC_FC_PAGE_START)
   10285 		page = 0;
   10286 
   10287 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10288 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10289 		    page << BME1000_PAGE_SHIFT);
   10290 	}
   10291 
   10292 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10293 	return rv;
   10294 }
   10295 
   10296 /*
   10297  * wm_gmii_hv_writereg:	[mii interface function]
   10298  *
   10299  *	Write a PHY register on the kumeran.
   10300  * This could be handled by the PHY layer if we didn't have to lock the
   10301  * ressource ...
   10302  */
   10303 static void
   10304 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10305 {
   10306 	struct wm_softc *sc = device_private(dev);
   10307 
   10308 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10309 		device_xname(dev), __func__));
   10310 
   10311 	if (sc->phy.acquire(sc)) {
   10312 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10313 		return;
   10314 	}
   10315 
   10316 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10317 	sc->phy.release(sc);
   10318 }
   10319 
   10320 static void
   10321 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10322 {
   10323 	struct wm_softc *sc = device_private(dev);
   10324 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10325 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10326 
   10327 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10328 
   10329 	/* Page 800 works differently than the rest so it has its own func */
   10330 	if (page == BM_WUC_PAGE) {
   10331 		uint16_t tmp;
   10332 
   10333 		tmp = val;
   10334 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10335 		return;
   10336 	}
   10337 
   10338 	/*
   10339 	 * Lower than page 768 works differently than the rest so it has its
   10340 	 * own func
   10341 	 */
   10342 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10343 		printf("gmii_hv_writereg!!!\n");
   10344 		return;
   10345 	}
   10346 
   10347 	{
   10348 		/*
   10349 		 * XXX I21[789] documents say that the SMBus Address register
   10350 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10351 		 */
   10352 		if (page == HV_INTC_FC_PAGE_START)
   10353 			page = 0;
   10354 
   10355 		/*
   10356 		 * XXX Workaround MDIO accesses being disabled after entering
   10357 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10358 		 * register is set)
   10359 		 */
   10360 		if (sc->sc_phytype == WMPHY_82578) {
   10361 			struct mii_softc *child;
   10362 
   10363 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10364 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10365 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10366 			    && ((val & (1 << 11)) != 0)) {
   10367 				printf("XXX need workaround\n");
   10368 			}
   10369 		}
   10370 
   10371 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10372 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10373 			    page << BME1000_PAGE_SHIFT);
   10374 		}
   10375 	}
   10376 
   10377 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10378 }
   10379 
   10380 /*
   10381  * wm_gmii_82580_readreg:	[mii interface function]
   10382  *
   10383  *	Read a PHY register on the 82580 and I350.
   10384  * This could be handled by the PHY layer if we didn't have to lock the
   10385  * ressource ...
   10386  */
   10387 static int
   10388 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10389 {
   10390 	struct wm_softc *sc = device_private(dev);
   10391 	int rv;
   10392 
   10393 	if (sc->phy.acquire(sc) != 0) {
   10394 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10395 		return 0;
   10396 	}
   10397 
   10398 #ifdef DIAGNOSTIC
   10399 	if (reg > MII_ADDRMASK) {
   10400 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10401 		    __func__, sc->sc_phytype, reg);
   10402 		reg &= MII_ADDRMASK;
   10403 	}
   10404 #endif
   10405 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10406 
   10407 	sc->phy.release(sc);
   10408 	return rv;
   10409 }
   10410 
   10411 /*
   10412  * wm_gmii_82580_writereg:	[mii interface function]
   10413  *
   10414  *	Write a PHY register on the 82580 and I350.
   10415  * This could be handled by the PHY layer if we didn't have to lock the
   10416  * ressource ...
   10417  */
   10418 static void
   10419 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10420 {
   10421 	struct wm_softc *sc = device_private(dev);
   10422 
   10423 	if (sc->phy.acquire(sc) != 0) {
   10424 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10425 		return;
   10426 	}
   10427 
   10428 #ifdef DIAGNOSTIC
   10429 	if (reg > MII_ADDRMASK) {
   10430 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10431 		    __func__, sc->sc_phytype, reg);
   10432 		reg &= MII_ADDRMASK;
   10433 	}
   10434 #endif
   10435 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10436 
   10437 	sc->phy.release(sc);
   10438 }
   10439 
   10440 /*
   10441  * wm_gmii_gs40g_readreg:	[mii interface function]
   10442  *
   10443  *	Read a PHY register on the I2100 and I211.
   10444  * This could be handled by the PHY layer if we didn't have to lock the
   10445  * ressource ...
   10446  */
   10447 static int
   10448 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10449 {
   10450 	struct wm_softc *sc = device_private(dev);
   10451 	int page, offset;
   10452 	int rv;
   10453 
   10454 	/* Acquire semaphore */
   10455 	if (sc->phy.acquire(sc)) {
   10456 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10457 		return 0;
   10458 	}
   10459 
   10460 	/* Page select */
   10461 	page = reg >> GS40G_PAGE_SHIFT;
   10462 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10463 
   10464 	/* Read reg */
   10465 	offset = reg & GS40G_OFFSET_MASK;
   10466 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10467 
   10468 	sc->phy.release(sc);
   10469 	return rv;
   10470 }
   10471 
   10472 /*
   10473  * wm_gmii_gs40g_writereg:	[mii interface function]
   10474  *
   10475  *	Write a PHY register on the I210 and I211.
   10476  * This could be handled by the PHY layer if we didn't have to lock the
   10477  * ressource ...
   10478  */
   10479 static void
   10480 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10481 {
   10482 	struct wm_softc *sc = device_private(dev);
   10483 	int page, offset;
   10484 
   10485 	/* Acquire semaphore */
   10486 	if (sc->phy.acquire(sc)) {
   10487 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10488 		return;
   10489 	}
   10490 
   10491 	/* Page select */
   10492 	page = reg >> GS40G_PAGE_SHIFT;
   10493 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10494 
   10495 	/* Write reg */
   10496 	offset = reg & GS40G_OFFSET_MASK;
   10497 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10498 
   10499 	/* Release semaphore */
   10500 	sc->phy.release(sc);
   10501 }
   10502 
   10503 /*
   10504  * wm_gmii_statchg:	[mii interface function]
   10505  *
   10506  *	Callback from MII layer when media changes.
   10507  */
   10508 static void
   10509 wm_gmii_statchg(struct ifnet *ifp)
   10510 {
   10511 	struct wm_softc *sc = ifp->if_softc;
   10512 	struct mii_data *mii = &sc->sc_mii;
   10513 
   10514 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10515 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10516 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10517 
   10518 	/*
   10519 	 * Get flow control negotiation result.
   10520 	 */
   10521 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10522 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10523 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10524 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10525 	}
   10526 
   10527 	if (sc->sc_flowflags & IFM_FLOW) {
   10528 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10529 			sc->sc_ctrl |= CTRL_TFCE;
   10530 			sc->sc_fcrtl |= FCRTL_XONE;
   10531 		}
   10532 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10533 			sc->sc_ctrl |= CTRL_RFCE;
   10534 	}
   10535 
   10536 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10537 		DPRINTF(WM_DEBUG_LINK,
   10538 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10539 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10540 	} else {
   10541 		DPRINTF(WM_DEBUG_LINK,
   10542 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10543 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10544 	}
   10545 
   10546 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10547 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10548 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10549 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10550 	if (sc->sc_type == WM_T_80003) {
   10551 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10552 		case IFM_1000_T:
   10553 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10554 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10555 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10556 			break;
   10557 		default:
   10558 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10559 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10560 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10561 			break;
   10562 		}
   10563 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10564 	}
   10565 }
   10566 
   10567 /* kumeran related (80003, ICH* and PCH*) */
   10568 
   10569 /*
   10570  * wm_kmrn_readreg:
   10571  *
   10572  *	Read a kumeran register
   10573  */
   10574 static int
   10575 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10576 {
   10577 	int rv;
   10578 
   10579 	if (sc->sc_type == WM_T_80003)
   10580 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10581 	else
   10582 		rv = sc->phy.acquire(sc);
   10583 	if (rv != 0) {
   10584 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10585 		    __func__);
   10586 		return rv;
   10587 	}
   10588 
   10589 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10590 
   10591 	if (sc->sc_type == WM_T_80003)
   10592 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10593 	else
   10594 		sc->phy.release(sc);
   10595 
   10596 	return rv;
   10597 }
   10598 
   10599 static int
   10600 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10601 {
   10602 
   10603 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10604 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10605 	    KUMCTRLSTA_REN);
   10606 	CSR_WRITE_FLUSH(sc);
   10607 	delay(2);
   10608 
   10609 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10610 
   10611 	return 0;
   10612 }
   10613 
   10614 /*
   10615  * wm_kmrn_writereg:
   10616  *
   10617  *	Write a kumeran register
   10618  */
   10619 static int
   10620 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10621 {
   10622 	int rv;
   10623 
   10624 	if (sc->sc_type == WM_T_80003)
   10625 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10626 	else
   10627 		rv = sc->phy.acquire(sc);
   10628 	if (rv != 0) {
   10629 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10630 		    __func__);
   10631 		return rv;
   10632 	}
   10633 
   10634 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10635 
   10636 	if (sc->sc_type == WM_T_80003)
   10637 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10638 	else
   10639 		sc->phy.release(sc);
   10640 
   10641 	return rv;
   10642 }
   10643 
   10644 static int
   10645 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10646 {
   10647 
   10648 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10649 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10650 
   10651 	return 0;
   10652 }
   10653 
   10654 /* SGMII related */
   10655 
   10656 /*
   10657  * wm_sgmii_uses_mdio
   10658  *
   10659  * Check whether the transaction is to the internal PHY or the external
   10660  * MDIO interface. Return true if it's MDIO.
   10661  */
   10662 static bool
   10663 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10664 {
   10665 	uint32_t reg;
   10666 	bool ismdio = false;
   10667 
   10668 	switch (sc->sc_type) {
   10669 	case WM_T_82575:
   10670 	case WM_T_82576:
   10671 		reg = CSR_READ(sc, WMREG_MDIC);
   10672 		ismdio = ((reg & MDIC_DEST) != 0);
   10673 		break;
   10674 	case WM_T_82580:
   10675 	case WM_T_I350:
   10676 	case WM_T_I354:
   10677 	case WM_T_I210:
   10678 	case WM_T_I211:
   10679 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10680 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10681 		break;
   10682 	default:
   10683 		break;
   10684 	}
   10685 
   10686 	return ismdio;
   10687 }
   10688 
   10689 /*
   10690  * wm_sgmii_readreg:	[mii interface function]
   10691  *
   10692  *	Read a PHY register on the SGMII
   10693  * This could be handled by the PHY layer if we didn't have to lock the
   10694  * ressource ...
   10695  */
   10696 static int
   10697 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10698 {
   10699 	struct wm_softc *sc = device_private(dev);
   10700 	uint32_t i2ccmd;
   10701 	int i, rv;
   10702 
   10703 	if (sc->phy.acquire(sc)) {
   10704 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10705 		return 0;
   10706 	}
   10707 
   10708 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10709 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10710 	    | I2CCMD_OPCODE_READ;
   10711 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10712 
   10713 	/* Poll the ready bit */
   10714 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10715 		delay(50);
   10716 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10717 		if (i2ccmd & I2CCMD_READY)
   10718 			break;
   10719 	}
   10720 	if ((i2ccmd & I2CCMD_READY) == 0)
   10721 		device_printf(dev, "I2CCMD Read did not complete\n");
   10722 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10723 		device_printf(dev, "I2CCMD Error bit set\n");
   10724 
   10725 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10726 
   10727 	sc->phy.release(sc);
   10728 	return rv;
   10729 }
   10730 
   10731 /*
   10732  * wm_sgmii_writereg:	[mii interface function]
   10733  *
   10734  *	Write a PHY register on the SGMII.
   10735  * This could be handled by the PHY layer if we didn't have to lock the
   10736  * ressource ...
   10737  */
   10738 static void
   10739 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10740 {
   10741 	struct wm_softc *sc = device_private(dev);
   10742 	uint32_t i2ccmd;
   10743 	int i;
   10744 	int val_swapped;
   10745 
   10746 	if (sc->phy.acquire(sc) != 0) {
   10747 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10748 		return;
   10749 	}
   10750 	/* Swap the data bytes for the I2C interface */
   10751 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10752 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10753 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10754 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10755 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10756 
   10757 	/* Poll the ready bit */
   10758 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10759 		delay(50);
   10760 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10761 		if (i2ccmd & I2CCMD_READY)
   10762 			break;
   10763 	}
   10764 	if ((i2ccmd & I2CCMD_READY) == 0)
   10765 		device_printf(dev, "I2CCMD Write did not complete\n");
   10766 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10767 		device_printf(dev, "I2CCMD Error bit set\n");
   10768 
   10769 	sc->phy.release(sc);
   10770 }
   10771 
   10772 /* TBI related */
   10773 
   10774 /*
   10775  * wm_tbi_mediainit:
   10776  *
   10777  *	Initialize media for use on 1000BASE-X devices.
   10778  */
   10779 static void
   10780 wm_tbi_mediainit(struct wm_softc *sc)
   10781 {
   10782 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10783 	const char *sep = "";
   10784 
   10785 	if (sc->sc_type < WM_T_82543)
   10786 		sc->sc_tipg = TIPG_WM_DFLT;
   10787 	else
   10788 		sc->sc_tipg = TIPG_LG_DFLT;
   10789 
   10790 	sc->sc_tbi_serdes_anegticks = 5;
   10791 
   10792 	/* Initialize our media structures */
   10793 	sc->sc_mii.mii_ifp = ifp;
   10794 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10795 
   10796 	if ((sc->sc_type >= WM_T_82575)
   10797 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10798 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10799 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10800 	else
   10801 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10802 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10803 
   10804 	/*
   10805 	 * SWD Pins:
   10806 	 *
   10807 	 *	0 = Link LED (output)
   10808 	 *	1 = Loss Of Signal (input)
   10809 	 */
   10810 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10811 
   10812 	/* XXX Perhaps this is only for TBI */
   10813 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10814 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10815 
   10816 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10817 		sc->sc_ctrl &= ~CTRL_LRST;
   10818 
   10819 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10820 
   10821 #define	ADD(ss, mm, dd)							\
   10822 do {									\
   10823 	aprint_normal("%s%s", sep, ss);					\
   10824 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10825 	sep = ", ";							\
   10826 } while (/*CONSTCOND*/0)
   10827 
   10828 	aprint_normal_dev(sc->sc_dev, "");
   10829 
   10830 	if (sc->sc_type == WM_T_I354) {
   10831 		uint32_t status;
   10832 
   10833 		status = CSR_READ(sc, WMREG_STATUS);
   10834 		if (((status & STATUS_2P5_SKU) != 0)
   10835 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10836 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10837 		} else
   10838 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10839 	} else if (sc->sc_type == WM_T_82545) {
   10840 		/* Only 82545 is LX (XXX except SFP) */
   10841 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10842 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10843 	} else {
   10844 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10845 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10846 	}
   10847 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10848 	aprint_normal("\n");
   10849 
   10850 #undef ADD
   10851 
   10852 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10853 }
   10854 
   10855 /*
   10856  * wm_tbi_mediachange:	[ifmedia interface function]
   10857  *
   10858  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10859  */
   10860 static int
   10861 wm_tbi_mediachange(struct ifnet *ifp)
   10862 {
   10863 	struct wm_softc *sc = ifp->if_softc;
   10864 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10865 	uint32_t status;
   10866 	int i;
   10867 
   10868 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10869 		/* XXX need some work for >= 82571 and < 82575 */
   10870 		if (sc->sc_type < WM_T_82575)
   10871 			return 0;
   10872 	}
   10873 
   10874 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10875 	    || (sc->sc_type >= WM_T_82575))
   10876 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10877 
   10878 	sc->sc_ctrl &= ~CTRL_LRST;
   10879 	sc->sc_txcw = TXCW_ANE;
   10880 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10881 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10882 	else if (ife->ifm_media & IFM_FDX)
   10883 		sc->sc_txcw |= TXCW_FD;
   10884 	else
   10885 		sc->sc_txcw |= TXCW_HD;
   10886 
   10887 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10888 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10889 
   10890 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10891 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10892 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10893 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10894 	CSR_WRITE_FLUSH(sc);
   10895 	delay(1000);
   10896 
   10897 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10898 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10899 
   10900 	/*
   10901 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10902 	 * optics detect a signal, 0 if they don't.
   10903 	 */
   10904 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10905 		/* Have signal; wait for the link to come up. */
   10906 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10907 			delay(10000);
   10908 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10909 				break;
   10910 		}
   10911 
   10912 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10913 			    device_xname(sc->sc_dev),i));
   10914 
   10915 		status = CSR_READ(sc, WMREG_STATUS);
   10916 		DPRINTF(WM_DEBUG_LINK,
   10917 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10918 			device_xname(sc->sc_dev),status, STATUS_LU));
   10919 		if (status & STATUS_LU) {
   10920 			/* Link is up. */
   10921 			DPRINTF(WM_DEBUG_LINK,
   10922 			    ("%s: LINK: set media -> link up %s\n",
   10923 			    device_xname(sc->sc_dev),
   10924 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10925 
   10926 			/*
   10927 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10928 			 * so we should update sc->sc_ctrl
   10929 			 */
   10930 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10931 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10932 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10933 			if (status & STATUS_FD)
   10934 				sc->sc_tctl |=
   10935 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10936 			else
   10937 				sc->sc_tctl |=
   10938 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10939 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10940 				sc->sc_fcrtl |= FCRTL_XONE;
   10941 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10942 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10943 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10944 				      sc->sc_fcrtl);
   10945 			sc->sc_tbi_linkup = 1;
   10946 		} else {
   10947 			if (i == WM_LINKUP_TIMEOUT)
   10948 				wm_check_for_link(sc);
   10949 			/* Link is down. */
   10950 			DPRINTF(WM_DEBUG_LINK,
   10951 			    ("%s: LINK: set media -> link down\n",
   10952 			    device_xname(sc->sc_dev)));
   10953 			sc->sc_tbi_linkup = 0;
   10954 		}
   10955 	} else {
   10956 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10957 		    device_xname(sc->sc_dev)));
   10958 		sc->sc_tbi_linkup = 0;
   10959 	}
   10960 
   10961 	wm_tbi_serdes_set_linkled(sc);
   10962 
   10963 	return 0;
   10964 }
   10965 
   10966 /*
   10967  * wm_tbi_mediastatus:	[ifmedia interface function]
   10968  *
   10969  *	Get the current interface media status on a 1000BASE-X device.
   10970  */
   10971 static void
   10972 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10973 {
   10974 	struct wm_softc *sc = ifp->if_softc;
   10975 	uint32_t ctrl, status;
   10976 
   10977 	ifmr->ifm_status = IFM_AVALID;
   10978 	ifmr->ifm_active = IFM_ETHER;
   10979 
   10980 	status = CSR_READ(sc, WMREG_STATUS);
   10981 	if ((status & STATUS_LU) == 0) {
   10982 		ifmr->ifm_active |= IFM_NONE;
   10983 		return;
   10984 	}
   10985 
   10986 	ifmr->ifm_status |= IFM_ACTIVE;
   10987 	/* Only 82545 is LX */
   10988 	if (sc->sc_type == WM_T_82545)
   10989 		ifmr->ifm_active |= IFM_1000_LX;
   10990 	else
   10991 		ifmr->ifm_active |= IFM_1000_SX;
   10992 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10993 		ifmr->ifm_active |= IFM_FDX;
   10994 	else
   10995 		ifmr->ifm_active |= IFM_HDX;
   10996 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10997 	if (ctrl & CTRL_RFCE)
   10998 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10999 	if (ctrl & CTRL_TFCE)
   11000 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11001 }
   11002 
   11003 /* XXX TBI only */
   11004 static int
   11005 wm_check_for_link(struct wm_softc *sc)
   11006 {
   11007 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11008 	uint32_t rxcw;
   11009 	uint32_t ctrl;
   11010 	uint32_t status;
   11011 	uint32_t sig;
   11012 
   11013 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11014 		/* XXX need some work for >= 82571 */
   11015 		if (sc->sc_type >= WM_T_82571) {
   11016 			sc->sc_tbi_linkup = 1;
   11017 			return 0;
   11018 		}
   11019 	}
   11020 
   11021 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11022 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11023 	status = CSR_READ(sc, WMREG_STATUS);
   11024 
   11025 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11026 
   11027 	DPRINTF(WM_DEBUG_LINK,
   11028 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11029 		device_xname(sc->sc_dev), __func__,
   11030 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11031 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11032 
   11033 	/*
   11034 	 * SWDPIN   LU RXCW
   11035 	 *      0    0    0
   11036 	 *      0    0    1	(should not happen)
   11037 	 *      0    1    0	(should not happen)
   11038 	 *      0    1    1	(should not happen)
   11039 	 *      1    0    0	Disable autonego and force linkup
   11040 	 *      1    0    1	got /C/ but not linkup yet
   11041 	 *      1    1    0	(linkup)
   11042 	 *      1    1    1	If IFM_AUTO, back to autonego
   11043 	 *
   11044 	 */
   11045 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11046 	    && ((status & STATUS_LU) == 0)
   11047 	    && ((rxcw & RXCW_C) == 0)) {
   11048 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11049 			__func__));
   11050 		sc->sc_tbi_linkup = 0;
   11051 		/* Disable auto-negotiation in the TXCW register */
   11052 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11053 
   11054 		/*
   11055 		 * Force link-up and also force full-duplex.
   11056 		 *
   11057 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11058 		 * so we should update sc->sc_ctrl
   11059 		 */
   11060 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11061 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11062 	} else if (((status & STATUS_LU) != 0)
   11063 	    && ((rxcw & RXCW_C) != 0)
   11064 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11065 		sc->sc_tbi_linkup = 1;
   11066 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11067 			__func__));
   11068 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11069 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11070 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11071 	    && ((rxcw & RXCW_C) != 0)) {
   11072 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11073 	} else {
   11074 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11075 			status));
   11076 	}
   11077 
   11078 	return 0;
   11079 }
   11080 
   11081 /*
   11082  * wm_tbi_tick:
   11083  *
   11084  *	Check the link on TBI devices.
   11085  *	This function acts as mii_tick().
   11086  */
   11087 static void
   11088 wm_tbi_tick(struct wm_softc *sc)
   11089 {
   11090 	struct mii_data *mii = &sc->sc_mii;
   11091 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11092 	uint32_t status;
   11093 
   11094 	KASSERT(WM_CORE_LOCKED(sc));
   11095 
   11096 	status = CSR_READ(sc, WMREG_STATUS);
   11097 
   11098 	/* XXX is this needed? */
   11099 	(void)CSR_READ(sc, WMREG_RXCW);
   11100 	(void)CSR_READ(sc, WMREG_CTRL);
   11101 
   11102 	/* set link status */
   11103 	if ((status & STATUS_LU) == 0) {
   11104 		DPRINTF(WM_DEBUG_LINK,
   11105 		    ("%s: LINK: checklink -> down\n",
   11106 			device_xname(sc->sc_dev)));
   11107 		sc->sc_tbi_linkup = 0;
   11108 	} else if (sc->sc_tbi_linkup == 0) {
   11109 		DPRINTF(WM_DEBUG_LINK,
   11110 		    ("%s: LINK: checklink -> up %s\n",
   11111 			device_xname(sc->sc_dev),
   11112 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11113 		sc->sc_tbi_linkup = 1;
   11114 		sc->sc_tbi_serdes_ticks = 0;
   11115 	}
   11116 
   11117 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11118 		goto setled;
   11119 
   11120 	if ((status & STATUS_LU) == 0) {
   11121 		sc->sc_tbi_linkup = 0;
   11122 		/* If the timer expired, retry autonegotiation */
   11123 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11124 		    && (++sc->sc_tbi_serdes_ticks
   11125 			>= sc->sc_tbi_serdes_anegticks)) {
   11126 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11127 			sc->sc_tbi_serdes_ticks = 0;
   11128 			/*
   11129 			 * Reset the link, and let autonegotiation do
   11130 			 * its thing
   11131 			 */
   11132 			sc->sc_ctrl |= CTRL_LRST;
   11133 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11134 			CSR_WRITE_FLUSH(sc);
   11135 			delay(1000);
   11136 			sc->sc_ctrl &= ~CTRL_LRST;
   11137 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11138 			CSR_WRITE_FLUSH(sc);
   11139 			delay(1000);
   11140 			CSR_WRITE(sc, WMREG_TXCW,
   11141 			    sc->sc_txcw & ~TXCW_ANE);
   11142 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11143 		}
   11144 	}
   11145 
   11146 setled:
   11147 	wm_tbi_serdes_set_linkled(sc);
   11148 }
   11149 
   11150 /* SERDES related */
   11151 static void
   11152 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11153 {
   11154 	uint32_t reg;
   11155 
   11156 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11157 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11158 		return;
   11159 
   11160 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11161 	reg |= PCS_CFG_PCS_EN;
   11162 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11163 
   11164 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11165 	reg &= ~CTRL_EXT_SWDPIN(3);
   11166 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11167 	CSR_WRITE_FLUSH(sc);
   11168 }
   11169 
   11170 static int
   11171 wm_serdes_mediachange(struct ifnet *ifp)
   11172 {
   11173 	struct wm_softc *sc = ifp->if_softc;
   11174 	bool pcs_autoneg = true; /* XXX */
   11175 	uint32_t ctrl_ext, pcs_lctl, reg;
   11176 
   11177 	/* XXX Currently, this function is not called on 8257[12] */
   11178 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11179 	    || (sc->sc_type >= WM_T_82575))
   11180 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11181 
   11182 	wm_serdes_power_up_link_82575(sc);
   11183 
   11184 	sc->sc_ctrl |= CTRL_SLU;
   11185 
   11186 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11187 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11188 
   11189 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11190 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11191 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11192 	case CTRL_EXT_LINK_MODE_SGMII:
   11193 		pcs_autoneg = true;
   11194 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11195 		break;
   11196 	case CTRL_EXT_LINK_MODE_1000KX:
   11197 		pcs_autoneg = false;
   11198 		/* FALLTHROUGH */
   11199 	default:
   11200 		if ((sc->sc_type == WM_T_82575)
   11201 		    || (sc->sc_type == WM_T_82576)) {
   11202 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11203 				pcs_autoneg = false;
   11204 		}
   11205 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11206 		    | CTRL_FRCFDX;
   11207 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11208 	}
   11209 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11210 
   11211 	if (pcs_autoneg) {
   11212 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11213 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11214 
   11215 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11216 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11217 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11218 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11219 	} else
   11220 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11221 
   11222 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11223 
   11224 
   11225 	return 0;
   11226 }
   11227 
   11228 static void
   11229 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11230 {
   11231 	struct wm_softc *sc = ifp->if_softc;
   11232 	struct mii_data *mii = &sc->sc_mii;
   11233 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11234 	uint32_t pcs_adv, pcs_lpab, reg;
   11235 
   11236 	ifmr->ifm_status = IFM_AVALID;
   11237 	ifmr->ifm_active = IFM_ETHER;
   11238 
   11239 	/* Check PCS */
   11240 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11241 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11242 		ifmr->ifm_active |= IFM_NONE;
   11243 		sc->sc_tbi_linkup = 0;
   11244 		goto setled;
   11245 	}
   11246 
   11247 	sc->sc_tbi_linkup = 1;
   11248 	ifmr->ifm_status |= IFM_ACTIVE;
   11249 	if (sc->sc_type == WM_T_I354) {
   11250 		uint32_t status;
   11251 
   11252 		status = CSR_READ(sc, WMREG_STATUS);
   11253 		if (((status & STATUS_2P5_SKU) != 0)
   11254 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11255 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11256 		} else
   11257 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11258 	} else {
   11259 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11260 		case PCS_LSTS_SPEED_10:
   11261 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11262 			break;
   11263 		case PCS_LSTS_SPEED_100:
   11264 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11265 			break;
   11266 		case PCS_LSTS_SPEED_1000:
   11267 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11268 			break;
   11269 		default:
   11270 			device_printf(sc->sc_dev, "Unknown speed\n");
   11271 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11272 			break;
   11273 		}
   11274 	}
   11275 	if ((reg & PCS_LSTS_FDX) != 0)
   11276 		ifmr->ifm_active |= IFM_FDX;
   11277 	else
   11278 		ifmr->ifm_active |= IFM_HDX;
   11279 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11280 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11281 		/* Check flow */
   11282 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11283 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11284 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11285 			goto setled;
   11286 		}
   11287 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11288 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11289 		DPRINTF(WM_DEBUG_LINK,
   11290 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11291 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11292 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11293 			mii->mii_media_active |= IFM_FLOW
   11294 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11295 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11296 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11297 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11298 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11299 			mii->mii_media_active |= IFM_FLOW
   11300 			    | IFM_ETH_TXPAUSE;
   11301 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11302 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11303 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11304 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11305 			mii->mii_media_active |= IFM_FLOW
   11306 			    | IFM_ETH_RXPAUSE;
   11307 		}
   11308 	}
   11309 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11310 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11311 setled:
   11312 	wm_tbi_serdes_set_linkled(sc);
   11313 }
   11314 
   11315 /*
   11316  * wm_serdes_tick:
   11317  *
   11318  *	Check the link on serdes devices.
   11319  */
   11320 static void
   11321 wm_serdes_tick(struct wm_softc *sc)
   11322 {
   11323 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11324 	struct mii_data *mii = &sc->sc_mii;
   11325 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11326 	uint32_t reg;
   11327 
   11328 	KASSERT(WM_CORE_LOCKED(sc));
   11329 
   11330 	mii->mii_media_status = IFM_AVALID;
   11331 	mii->mii_media_active = IFM_ETHER;
   11332 
   11333 	/* Check PCS */
   11334 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11335 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11336 		mii->mii_media_status |= IFM_ACTIVE;
   11337 		sc->sc_tbi_linkup = 1;
   11338 		sc->sc_tbi_serdes_ticks = 0;
   11339 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11340 		if ((reg & PCS_LSTS_FDX) != 0)
   11341 			mii->mii_media_active |= IFM_FDX;
   11342 		else
   11343 			mii->mii_media_active |= IFM_HDX;
   11344 	} else {
   11345 		mii->mii_media_status |= IFM_NONE;
   11346 		sc->sc_tbi_linkup = 0;
   11347 		/* If the timer expired, retry autonegotiation */
   11348 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11349 		    && (++sc->sc_tbi_serdes_ticks
   11350 			>= sc->sc_tbi_serdes_anegticks)) {
   11351 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11352 			sc->sc_tbi_serdes_ticks = 0;
   11353 			/* XXX */
   11354 			wm_serdes_mediachange(ifp);
   11355 		}
   11356 	}
   11357 
   11358 	wm_tbi_serdes_set_linkled(sc);
   11359 }
   11360 
   11361 /* SFP related */
   11362 
   11363 static int
   11364 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11365 {
   11366 	uint32_t i2ccmd;
   11367 	int i;
   11368 
   11369 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11370 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11371 
   11372 	/* Poll the ready bit */
   11373 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11374 		delay(50);
   11375 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11376 		if (i2ccmd & I2CCMD_READY)
   11377 			break;
   11378 	}
   11379 	if ((i2ccmd & I2CCMD_READY) == 0)
   11380 		return -1;
   11381 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11382 		return -1;
   11383 
   11384 	*data = i2ccmd & 0x00ff;
   11385 
   11386 	return 0;
   11387 }
   11388 
   11389 static uint32_t
   11390 wm_sfp_get_media_type(struct wm_softc *sc)
   11391 {
   11392 	uint32_t ctrl_ext;
   11393 	uint8_t val = 0;
   11394 	int timeout = 3;
   11395 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11396 	int rv = -1;
   11397 
   11398 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11399 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11400 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11401 	CSR_WRITE_FLUSH(sc);
   11402 
   11403 	/* Read SFP module data */
   11404 	while (timeout) {
   11405 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11406 		if (rv == 0)
   11407 			break;
   11408 		delay(100*1000); /* XXX too big */
   11409 		timeout--;
   11410 	}
   11411 	if (rv != 0)
   11412 		goto out;
   11413 	switch (val) {
   11414 	case SFF_SFP_ID_SFF:
   11415 		aprint_normal_dev(sc->sc_dev,
   11416 		    "Module/Connector soldered to board\n");
   11417 		break;
   11418 	case SFF_SFP_ID_SFP:
   11419 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11420 		break;
   11421 	case SFF_SFP_ID_UNKNOWN:
   11422 		goto out;
   11423 	default:
   11424 		break;
   11425 	}
   11426 
   11427 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11428 	if (rv != 0) {
   11429 		goto out;
   11430 	}
   11431 
   11432 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11433 		mediatype = WM_MEDIATYPE_SERDES;
   11434 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11435 		sc->sc_flags |= WM_F_SGMII;
   11436 		mediatype = WM_MEDIATYPE_COPPER;
   11437 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11438 		sc->sc_flags |= WM_F_SGMII;
   11439 		mediatype = WM_MEDIATYPE_SERDES;
   11440 	}
   11441 
   11442 out:
   11443 	/* Restore I2C interface setting */
   11444 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11445 
   11446 	return mediatype;
   11447 }
   11448 
   11449 /*
   11450  * NVM related.
   11451  * Microwire, SPI (w/wo EERD) and Flash.
   11452  */
   11453 
   11454 /* Both spi and uwire */
   11455 
   11456 /*
   11457  * wm_eeprom_sendbits:
   11458  *
   11459  *	Send a series of bits to the EEPROM.
   11460  */
   11461 static void
   11462 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11463 {
   11464 	uint32_t reg;
   11465 	int x;
   11466 
   11467 	reg = CSR_READ(sc, WMREG_EECD);
   11468 
   11469 	for (x = nbits; x > 0; x--) {
   11470 		if (bits & (1U << (x - 1)))
   11471 			reg |= EECD_DI;
   11472 		else
   11473 			reg &= ~EECD_DI;
   11474 		CSR_WRITE(sc, WMREG_EECD, reg);
   11475 		CSR_WRITE_FLUSH(sc);
   11476 		delay(2);
   11477 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11478 		CSR_WRITE_FLUSH(sc);
   11479 		delay(2);
   11480 		CSR_WRITE(sc, WMREG_EECD, reg);
   11481 		CSR_WRITE_FLUSH(sc);
   11482 		delay(2);
   11483 	}
   11484 }
   11485 
   11486 /*
   11487  * wm_eeprom_recvbits:
   11488  *
   11489  *	Receive a series of bits from the EEPROM.
   11490  */
   11491 static void
   11492 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11493 {
   11494 	uint32_t reg, val;
   11495 	int x;
   11496 
   11497 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11498 
   11499 	val = 0;
   11500 	for (x = nbits; x > 0; x--) {
   11501 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11502 		CSR_WRITE_FLUSH(sc);
   11503 		delay(2);
   11504 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11505 			val |= (1U << (x - 1));
   11506 		CSR_WRITE(sc, WMREG_EECD, reg);
   11507 		CSR_WRITE_FLUSH(sc);
   11508 		delay(2);
   11509 	}
   11510 	*valp = val;
   11511 }
   11512 
   11513 /* Microwire */
   11514 
   11515 /*
   11516  * wm_nvm_read_uwire:
   11517  *
   11518  *	Read a word from the EEPROM using the MicroWire protocol.
   11519  */
   11520 static int
   11521 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11522 {
   11523 	uint32_t reg, val;
   11524 	int i;
   11525 
   11526 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11527 		device_xname(sc->sc_dev), __func__));
   11528 
   11529 	if (sc->nvm.acquire(sc) != 0)
   11530 		return -1;
   11531 
   11532 	for (i = 0; i < wordcnt; i++) {
   11533 		/* Clear SK and DI. */
   11534 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11535 		CSR_WRITE(sc, WMREG_EECD, reg);
   11536 
   11537 		/*
   11538 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11539 		 * and Xen.
   11540 		 *
   11541 		 * We use this workaround only for 82540 because qemu's
   11542 		 * e1000 act as 82540.
   11543 		 */
   11544 		if (sc->sc_type == WM_T_82540) {
   11545 			reg |= EECD_SK;
   11546 			CSR_WRITE(sc, WMREG_EECD, reg);
   11547 			reg &= ~EECD_SK;
   11548 			CSR_WRITE(sc, WMREG_EECD, reg);
   11549 			CSR_WRITE_FLUSH(sc);
   11550 			delay(2);
   11551 		}
   11552 		/* XXX: end of workaround */
   11553 
   11554 		/* Set CHIP SELECT. */
   11555 		reg |= EECD_CS;
   11556 		CSR_WRITE(sc, WMREG_EECD, reg);
   11557 		CSR_WRITE_FLUSH(sc);
   11558 		delay(2);
   11559 
   11560 		/* Shift in the READ command. */
   11561 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11562 
   11563 		/* Shift in address. */
   11564 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11565 
   11566 		/* Shift out the data. */
   11567 		wm_eeprom_recvbits(sc, &val, 16);
   11568 		data[i] = val & 0xffff;
   11569 
   11570 		/* Clear CHIP SELECT. */
   11571 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11572 		CSR_WRITE(sc, WMREG_EECD, reg);
   11573 		CSR_WRITE_FLUSH(sc);
   11574 		delay(2);
   11575 	}
   11576 
   11577 	sc->nvm.release(sc);
   11578 	return 0;
   11579 }
   11580 
   11581 /* SPI */
   11582 
   11583 /*
   11584  * Set SPI and FLASH related information from the EECD register.
   11585  * For 82541 and 82547, the word size is taken from EEPROM.
   11586  */
   11587 static int
   11588 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11589 {
   11590 	int size;
   11591 	uint32_t reg;
   11592 	uint16_t data;
   11593 
   11594 	reg = CSR_READ(sc, WMREG_EECD);
   11595 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11596 
   11597 	/* Read the size of NVM from EECD by default */
   11598 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11599 	switch (sc->sc_type) {
   11600 	case WM_T_82541:
   11601 	case WM_T_82541_2:
   11602 	case WM_T_82547:
   11603 	case WM_T_82547_2:
   11604 		/* Set dummy value to access EEPROM */
   11605 		sc->sc_nvm_wordsize = 64;
   11606 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11607 			aprint_error_dev(sc->sc_dev,
   11608 			    "%s: failed to read EEPROM size\n", __func__);
   11609 		}
   11610 		reg = data;
   11611 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11612 		if (size == 0)
   11613 			size = 6; /* 64 word size */
   11614 		else
   11615 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11616 		break;
   11617 	case WM_T_80003:
   11618 	case WM_T_82571:
   11619 	case WM_T_82572:
   11620 	case WM_T_82573: /* SPI case */
   11621 	case WM_T_82574: /* SPI case */
   11622 	case WM_T_82583: /* SPI case */
   11623 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11624 		if (size > 14)
   11625 			size = 14;
   11626 		break;
   11627 	case WM_T_82575:
   11628 	case WM_T_82576:
   11629 	case WM_T_82580:
   11630 	case WM_T_I350:
   11631 	case WM_T_I354:
   11632 	case WM_T_I210:
   11633 	case WM_T_I211:
   11634 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11635 		if (size > 15)
   11636 			size = 15;
   11637 		break;
   11638 	default:
   11639 		aprint_error_dev(sc->sc_dev,
   11640 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11641 		return -1;
   11642 		break;
   11643 	}
   11644 
   11645 	sc->sc_nvm_wordsize = 1 << size;
   11646 
   11647 	return 0;
   11648 }
   11649 
   11650 /*
   11651  * wm_nvm_ready_spi:
   11652  *
   11653  *	Wait for a SPI EEPROM to be ready for commands.
   11654  */
   11655 static int
   11656 wm_nvm_ready_spi(struct wm_softc *sc)
   11657 {
   11658 	uint32_t val;
   11659 	int usec;
   11660 
   11661 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11662 		device_xname(sc->sc_dev), __func__));
   11663 
   11664 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11665 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11666 		wm_eeprom_recvbits(sc, &val, 8);
   11667 		if ((val & SPI_SR_RDY) == 0)
   11668 			break;
   11669 	}
   11670 	if (usec >= SPI_MAX_RETRIES) {
   11671 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11672 		return -1;
   11673 	}
   11674 	return 0;
   11675 }
   11676 
   11677 /*
   11678  * wm_nvm_read_spi:
   11679  *
   11680  *	Read a work from the EEPROM using the SPI protocol.
   11681  */
   11682 static int
   11683 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11684 {
   11685 	uint32_t reg, val;
   11686 	int i;
   11687 	uint8_t opc;
   11688 	int rv = 0;
   11689 
   11690 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11691 		device_xname(sc->sc_dev), __func__));
   11692 
   11693 	if (sc->nvm.acquire(sc) != 0)
   11694 		return -1;
   11695 
   11696 	/* Clear SK and CS. */
   11697 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11698 	CSR_WRITE(sc, WMREG_EECD, reg);
   11699 	CSR_WRITE_FLUSH(sc);
   11700 	delay(2);
   11701 
   11702 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11703 		goto out;
   11704 
   11705 	/* Toggle CS to flush commands. */
   11706 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11707 	CSR_WRITE_FLUSH(sc);
   11708 	delay(2);
   11709 	CSR_WRITE(sc, WMREG_EECD, reg);
   11710 	CSR_WRITE_FLUSH(sc);
   11711 	delay(2);
   11712 
   11713 	opc = SPI_OPC_READ;
   11714 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11715 		opc |= SPI_OPC_A8;
   11716 
   11717 	wm_eeprom_sendbits(sc, opc, 8);
   11718 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11719 
   11720 	for (i = 0; i < wordcnt; i++) {
   11721 		wm_eeprom_recvbits(sc, &val, 16);
   11722 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11723 	}
   11724 
   11725 	/* Raise CS and clear SK. */
   11726 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11727 	CSR_WRITE(sc, WMREG_EECD, reg);
   11728 	CSR_WRITE_FLUSH(sc);
   11729 	delay(2);
   11730 
   11731 out:
   11732 	sc->nvm.release(sc);
   11733 	return rv;
   11734 }
   11735 
   11736 /* Using with EERD */
   11737 
   11738 static int
   11739 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11740 {
   11741 	uint32_t attempts = 100000;
   11742 	uint32_t i, reg = 0;
   11743 	int32_t done = -1;
   11744 
   11745 	for (i = 0; i < attempts; i++) {
   11746 		reg = CSR_READ(sc, rw);
   11747 
   11748 		if (reg & EERD_DONE) {
   11749 			done = 0;
   11750 			break;
   11751 		}
   11752 		delay(5);
   11753 	}
   11754 
   11755 	return done;
   11756 }
   11757 
   11758 static int
   11759 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11760     uint16_t *data)
   11761 {
   11762 	int i, eerd = 0;
   11763 	int rv = 0;
   11764 
   11765 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11766 		device_xname(sc->sc_dev), __func__));
   11767 
   11768 	if (sc->nvm.acquire(sc) != 0)
   11769 		return -1;
   11770 
   11771 	for (i = 0; i < wordcnt; i++) {
   11772 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11773 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11774 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11775 		if (rv != 0) {
   11776 			aprint_error_dev(sc->sc_dev, "EERD polling failed\n");
   11777 			break;
   11778 		}
   11779 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11780 	}
   11781 
   11782 	sc->nvm.release(sc);
   11783 	return rv;
   11784 }
   11785 
   11786 /* Flash */
   11787 
   11788 static int
   11789 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11790 {
   11791 	uint32_t eecd;
   11792 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11793 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11794 	uint8_t sig_byte = 0;
   11795 
   11796 	switch (sc->sc_type) {
   11797 	case WM_T_PCH_SPT:
   11798 		/*
   11799 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11800 		 * sector valid bits from the NVM.
   11801 		 */
   11802 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11803 		if ((*bank == 0) || (*bank == 1)) {
   11804 			aprint_error_dev(sc->sc_dev,
   11805 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11806 				*bank);
   11807 			return -1;
   11808 		} else {
   11809 			*bank = *bank - 2;
   11810 			return 0;
   11811 		}
   11812 	case WM_T_ICH8:
   11813 	case WM_T_ICH9:
   11814 		eecd = CSR_READ(sc, WMREG_EECD);
   11815 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11816 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11817 			return 0;
   11818 		}
   11819 		/* FALLTHROUGH */
   11820 	default:
   11821 		/* Default to 0 */
   11822 		*bank = 0;
   11823 
   11824 		/* Check bank 0 */
   11825 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11826 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11827 			*bank = 0;
   11828 			return 0;
   11829 		}
   11830 
   11831 		/* Check bank 1 */
   11832 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11833 		    &sig_byte);
   11834 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11835 			*bank = 1;
   11836 			return 0;
   11837 		}
   11838 	}
   11839 
   11840 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11841 		device_xname(sc->sc_dev)));
   11842 	return -1;
   11843 }
   11844 
   11845 /******************************************************************************
   11846  * This function does initial flash setup so that a new read/write/erase cycle
   11847  * can be started.
   11848  *
   11849  * sc - The pointer to the hw structure
   11850  ****************************************************************************/
   11851 static int32_t
   11852 wm_ich8_cycle_init(struct wm_softc *sc)
   11853 {
   11854 	uint16_t hsfsts;
   11855 	int32_t error = 1;
   11856 	int32_t i     = 0;
   11857 
   11858 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11859 
   11860 	/* May be check the Flash Des Valid bit in Hw status */
   11861 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11862 		return error;
   11863 	}
   11864 
   11865 	/* Clear FCERR in Hw status by writing 1 */
   11866 	/* Clear DAEL in Hw status by writing a 1 */
   11867 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11868 
   11869 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11870 
   11871 	/*
   11872 	 * Either we should have a hardware SPI cycle in progress bit to check
   11873 	 * against, in order to start a new cycle or FDONE bit should be
   11874 	 * changed in the hardware so that it is 1 after harware reset, which
   11875 	 * can then be used as an indication whether a cycle is in progress or
   11876 	 * has been completed .. we should also have some software semaphore
   11877 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11878 	 * threads access to those bits can be sequentiallized or a way so that
   11879 	 * 2 threads dont start the cycle at the same time
   11880 	 */
   11881 
   11882 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11883 		/*
   11884 		 * There is no cycle running at present, so we can start a
   11885 		 * cycle
   11886 		 */
   11887 
   11888 		/* Begin by setting Flash Cycle Done. */
   11889 		hsfsts |= HSFSTS_DONE;
   11890 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11891 		error = 0;
   11892 	} else {
   11893 		/*
   11894 		 * otherwise poll for sometime so the current cycle has a
   11895 		 * chance to end before giving up.
   11896 		 */
   11897 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11898 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11899 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11900 				error = 0;
   11901 				break;
   11902 			}
   11903 			delay(1);
   11904 		}
   11905 		if (error == 0) {
   11906 			/*
   11907 			 * Successful in waiting for previous cycle to timeout,
   11908 			 * now set the Flash Cycle Done.
   11909 			 */
   11910 			hsfsts |= HSFSTS_DONE;
   11911 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11912 		}
   11913 	}
   11914 	return error;
   11915 }
   11916 
   11917 /******************************************************************************
   11918  * This function starts a flash cycle and waits for its completion
   11919  *
   11920  * sc - The pointer to the hw structure
   11921  ****************************************************************************/
   11922 static int32_t
   11923 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11924 {
   11925 	uint16_t hsflctl;
   11926 	uint16_t hsfsts;
   11927 	int32_t error = 1;
   11928 	uint32_t i = 0;
   11929 
   11930 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11931 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11932 	hsflctl |= HSFCTL_GO;
   11933 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11934 
   11935 	/* Wait till FDONE bit is set to 1 */
   11936 	do {
   11937 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11938 		if (hsfsts & HSFSTS_DONE)
   11939 			break;
   11940 		delay(1);
   11941 		i++;
   11942 	} while (i < timeout);
   11943 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11944 		error = 0;
   11945 
   11946 	return error;
   11947 }
   11948 
   11949 /******************************************************************************
   11950  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11951  *
   11952  * sc - The pointer to the hw structure
   11953  * index - The index of the byte or word to read.
   11954  * size - Size of data to read, 1=byte 2=word, 4=dword
   11955  * data - Pointer to the word to store the value read.
   11956  *****************************************************************************/
   11957 static int32_t
   11958 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11959     uint32_t size, uint32_t *data)
   11960 {
   11961 	uint16_t hsfsts;
   11962 	uint16_t hsflctl;
   11963 	uint32_t flash_linear_address;
   11964 	uint32_t flash_data = 0;
   11965 	int32_t error = 1;
   11966 	int32_t count = 0;
   11967 
   11968 	if (size < 1  || size > 4 || data == 0x0 ||
   11969 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11970 		return error;
   11971 
   11972 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11973 	    sc->sc_ich8_flash_base;
   11974 
   11975 	do {
   11976 		delay(1);
   11977 		/* Steps */
   11978 		error = wm_ich8_cycle_init(sc);
   11979 		if (error)
   11980 			break;
   11981 
   11982 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11983 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11984 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11985 		    & HSFCTL_BCOUNT_MASK;
   11986 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11987 		if (sc->sc_type == WM_T_PCH_SPT) {
   11988 			/*
   11989 			 * In SPT, This register is in Lan memory space, not
   11990 			 * flash. Therefore, only 32 bit access is supported.
   11991 			 */
   11992 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11993 			    (uint32_t)hsflctl);
   11994 		} else
   11995 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11996 
   11997 		/*
   11998 		 * Write the last 24 bits of index into Flash Linear address
   11999 		 * field in Flash Address
   12000 		 */
   12001 		/* TODO: TBD maybe check the index against the size of flash */
   12002 
   12003 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12004 
   12005 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12006 
   12007 		/*
   12008 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12009 		 * the whole sequence a few more times, else read in (shift in)
   12010 		 * the Flash Data0, the order is least significant byte first
   12011 		 * msb to lsb
   12012 		 */
   12013 		if (error == 0) {
   12014 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12015 			if (size == 1)
   12016 				*data = (uint8_t)(flash_data & 0x000000FF);
   12017 			else if (size == 2)
   12018 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12019 			else if (size == 4)
   12020 				*data = (uint32_t)flash_data;
   12021 			break;
   12022 		} else {
   12023 			/*
   12024 			 * If we've gotten here, then things are probably
   12025 			 * completely hosed, but if the error condition is
   12026 			 * detected, it won't hurt to give it another try...
   12027 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12028 			 */
   12029 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12030 			if (hsfsts & HSFSTS_ERR) {
   12031 				/* Repeat for some time before giving up. */
   12032 				continue;
   12033 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12034 				break;
   12035 		}
   12036 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12037 
   12038 	return error;
   12039 }
   12040 
   12041 /******************************************************************************
   12042  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12043  *
   12044  * sc - pointer to wm_hw structure
   12045  * index - The index of the byte to read.
   12046  * data - Pointer to a byte to store the value read.
   12047  *****************************************************************************/
   12048 static int32_t
   12049 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12050 {
   12051 	int32_t status;
   12052 	uint32_t word = 0;
   12053 
   12054 	status = wm_read_ich8_data(sc, index, 1, &word);
   12055 	if (status == 0)
   12056 		*data = (uint8_t)word;
   12057 	else
   12058 		*data = 0;
   12059 
   12060 	return status;
   12061 }
   12062 
   12063 /******************************************************************************
   12064  * Reads a word from the NVM using the ICH8 flash access registers.
   12065  *
   12066  * sc - pointer to wm_hw structure
   12067  * index - The starting byte index of the word to read.
   12068  * data - Pointer to a word to store the value read.
   12069  *****************************************************************************/
   12070 static int32_t
   12071 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12072 {
   12073 	int32_t status;
   12074 	uint32_t word = 0;
   12075 
   12076 	status = wm_read_ich8_data(sc, index, 2, &word);
   12077 	if (status == 0)
   12078 		*data = (uint16_t)word;
   12079 	else
   12080 		*data = 0;
   12081 
   12082 	return status;
   12083 }
   12084 
   12085 /******************************************************************************
   12086  * Reads a dword from the NVM using the ICH8 flash access registers.
   12087  *
   12088  * sc - pointer to wm_hw structure
   12089  * index - The starting byte index of the word to read.
   12090  * data - Pointer to a word to store the value read.
   12091  *****************************************************************************/
   12092 static int32_t
   12093 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12094 {
   12095 	int32_t status;
   12096 
   12097 	status = wm_read_ich8_data(sc, index, 4, data);
   12098 	return status;
   12099 }
   12100 
   12101 /******************************************************************************
   12102  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12103  * register.
   12104  *
   12105  * sc - Struct containing variables accessed by shared code
   12106  * offset - offset of word in the EEPROM to read
   12107  * data - word read from the EEPROM
   12108  * words - number of words to read
   12109  *****************************************************************************/
   12110 static int
   12111 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12112 {
   12113 	int32_t  rv = 0;
   12114 	uint32_t flash_bank = 0;
   12115 	uint32_t act_offset = 0;
   12116 	uint32_t bank_offset = 0;
   12117 	uint16_t word = 0;
   12118 	uint16_t i = 0;
   12119 
   12120 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12121 		device_xname(sc->sc_dev), __func__));
   12122 
   12123 	if (sc->nvm.acquire(sc) != 0)
   12124 		return -1;
   12125 
   12126 	/*
   12127 	 * We need to know which is the valid flash bank.  In the event
   12128 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12129 	 * managing flash_bank.  So it cannot be trusted and needs
   12130 	 * to be updated with each read.
   12131 	 */
   12132 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12133 	if (rv) {
   12134 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12135 			device_xname(sc->sc_dev)));
   12136 		flash_bank = 0;
   12137 	}
   12138 
   12139 	/*
   12140 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12141 	 * size
   12142 	 */
   12143 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12144 
   12145 	for (i = 0; i < words; i++) {
   12146 		/* The NVM part needs a byte offset, hence * 2 */
   12147 		act_offset = bank_offset + ((offset + i) * 2);
   12148 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12149 		if (rv) {
   12150 			aprint_error_dev(sc->sc_dev,
   12151 			    "%s: failed to read NVM\n", __func__);
   12152 			break;
   12153 		}
   12154 		data[i] = word;
   12155 	}
   12156 
   12157 	sc->nvm.release(sc);
   12158 	return rv;
   12159 }
   12160 
   12161 /******************************************************************************
   12162  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12163  * register.
   12164  *
   12165  * sc - Struct containing variables accessed by shared code
   12166  * offset - offset of word in the EEPROM to read
   12167  * data - word read from the EEPROM
   12168  * words - number of words to read
   12169  *****************************************************************************/
   12170 static int
   12171 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12172 {
   12173 	int32_t  rv = 0;
   12174 	uint32_t flash_bank = 0;
   12175 	uint32_t act_offset = 0;
   12176 	uint32_t bank_offset = 0;
   12177 	uint32_t dword = 0;
   12178 	uint16_t i = 0;
   12179 
   12180 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12181 		device_xname(sc->sc_dev), __func__));
   12182 
   12183 	if (sc->nvm.acquire(sc) != 0)
   12184 		return -1;
   12185 
   12186 	/*
   12187 	 * We need to know which is the valid flash bank.  In the event
   12188 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12189 	 * managing flash_bank.  So it cannot be trusted and needs
   12190 	 * to be updated with each read.
   12191 	 */
   12192 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12193 	if (rv) {
   12194 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12195 			device_xname(sc->sc_dev)));
   12196 		flash_bank = 0;
   12197 	}
   12198 
   12199 	/*
   12200 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12201 	 * size
   12202 	 */
   12203 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12204 
   12205 	for (i = 0; i < words; i++) {
   12206 		/* The NVM part needs a byte offset, hence * 2 */
   12207 		act_offset = bank_offset + ((offset + i) * 2);
   12208 		/* but we must read dword aligned, so mask ... */
   12209 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12210 		if (rv) {
   12211 			aprint_error_dev(sc->sc_dev,
   12212 			    "%s: failed to read NVM\n", __func__);
   12213 			break;
   12214 		}
   12215 		/* ... and pick out low or high word */
   12216 		if ((act_offset & 0x2) == 0)
   12217 			data[i] = (uint16_t)(dword & 0xFFFF);
   12218 		else
   12219 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12220 	}
   12221 
   12222 	sc->nvm.release(sc);
   12223 	return rv;
   12224 }
   12225 
   12226 /* iNVM */
   12227 
   12228 static int
   12229 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12230 {
   12231 	int32_t  rv = 0;
   12232 	uint32_t invm_dword;
   12233 	uint16_t i;
   12234 	uint8_t record_type, word_address;
   12235 
   12236 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12237 		device_xname(sc->sc_dev), __func__));
   12238 
   12239 	for (i = 0; i < INVM_SIZE; i++) {
   12240 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12241 		/* Get record type */
   12242 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12243 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12244 			break;
   12245 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12246 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12247 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12248 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12249 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12250 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12251 			if (word_address == address) {
   12252 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12253 				rv = 0;
   12254 				break;
   12255 			}
   12256 		}
   12257 	}
   12258 
   12259 	return rv;
   12260 }
   12261 
   12262 static int
   12263 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12264 {
   12265 	int rv = 0;
   12266 	int i;
   12267 
   12268 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12269 		device_xname(sc->sc_dev), __func__));
   12270 
   12271 	if (sc->nvm.acquire(sc) != 0)
   12272 		return -1;
   12273 
   12274 	for (i = 0; i < words; i++) {
   12275 		switch (offset + i) {
   12276 		case NVM_OFF_MACADDR:
   12277 		case NVM_OFF_MACADDR1:
   12278 		case NVM_OFF_MACADDR2:
   12279 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12280 			if (rv != 0) {
   12281 				data[i] = 0xffff;
   12282 				rv = -1;
   12283 			}
   12284 			break;
   12285 		case NVM_OFF_CFG2:
   12286 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12287 			if (rv != 0) {
   12288 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12289 				rv = 0;
   12290 			}
   12291 			break;
   12292 		case NVM_OFF_CFG4:
   12293 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12294 			if (rv != 0) {
   12295 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12296 				rv = 0;
   12297 			}
   12298 			break;
   12299 		case NVM_OFF_LED_1_CFG:
   12300 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12301 			if (rv != 0) {
   12302 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12303 				rv = 0;
   12304 			}
   12305 			break;
   12306 		case NVM_OFF_LED_0_2_CFG:
   12307 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12308 			if (rv != 0) {
   12309 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12310 				rv = 0;
   12311 			}
   12312 			break;
   12313 		case NVM_OFF_ID_LED_SETTINGS:
   12314 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12315 			if (rv != 0) {
   12316 				*data = ID_LED_RESERVED_FFFF;
   12317 				rv = 0;
   12318 			}
   12319 			break;
   12320 		default:
   12321 			DPRINTF(WM_DEBUG_NVM,
   12322 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12323 			*data = NVM_RESERVED_WORD;
   12324 			break;
   12325 		}
   12326 	}
   12327 
   12328 	sc->nvm.release(sc);
   12329 	return rv;
   12330 }
   12331 
   12332 /* Lock, detecting NVM type, validate checksum, version and read */
   12333 
   12334 static int
   12335 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12336 {
   12337 	uint32_t eecd = 0;
   12338 
   12339 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12340 	    || sc->sc_type == WM_T_82583) {
   12341 		eecd = CSR_READ(sc, WMREG_EECD);
   12342 
   12343 		/* Isolate bits 15 & 16 */
   12344 		eecd = ((eecd >> 15) & 0x03);
   12345 
   12346 		/* If both bits are set, device is Flash type */
   12347 		if (eecd == 0x03)
   12348 			return 0;
   12349 	}
   12350 	return 1;
   12351 }
   12352 
   12353 static int
   12354 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12355 {
   12356 	uint32_t eec;
   12357 
   12358 	eec = CSR_READ(sc, WMREG_EEC);
   12359 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12360 		return 1;
   12361 
   12362 	return 0;
   12363 }
   12364 
   12365 /*
   12366  * wm_nvm_validate_checksum
   12367  *
   12368  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12369  */
   12370 static int
   12371 wm_nvm_validate_checksum(struct wm_softc *sc)
   12372 {
   12373 	uint16_t checksum;
   12374 	uint16_t eeprom_data;
   12375 #ifdef WM_DEBUG
   12376 	uint16_t csum_wordaddr, valid_checksum;
   12377 #endif
   12378 	int i;
   12379 
   12380 	checksum = 0;
   12381 
   12382 	/* Don't check for I211 */
   12383 	if (sc->sc_type == WM_T_I211)
   12384 		return 0;
   12385 
   12386 #ifdef WM_DEBUG
   12387 	if (sc->sc_type == WM_T_PCH_LPT) {
   12388 		csum_wordaddr = NVM_OFF_COMPAT;
   12389 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12390 	} else {
   12391 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12392 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12393 	}
   12394 
   12395 	/* Dump EEPROM image for debug */
   12396 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12397 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12398 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12399 		/* XXX PCH_SPT? */
   12400 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12401 		if ((eeprom_data & valid_checksum) == 0) {
   12402 			DPRINTF(WM_DEBUG_NVM,
   12403 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12404 				device_xname(sc->sc_dev), eeprom_data,
   12405 				    valid_checksum));
   12406 		}
   12407 	}
   12408 
   12409 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12410 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12411 		for (i = 0; i < NVM_SIZE; i++) {
   12412 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12413 				printf("XXXX ");
   12414 			else
   12415 				printf("%04hx ", eeprom_data);
   12416 			if (i % 8 == 7)
   12417 				printf("\n");
   12418 		}
   12419 	}
   12420 
   12421 #endif /* WM_DEBUG */
   12422 
   12423 	for (i = 0; i < NVM_SIZE; i++) {
   12424 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12425 			return 1;
   12426 		checksum += eeprom_data;
   12427 	}
   12428 
   12429 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12430 #ifdef WM_DEBUG
   12431 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12432 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12433 #endif
   12434 	}
   12435 
   12436 	return 0;
   12437 }
   12438 
   12439 static void
   12440 wm_nvm_version_invm(struct wm_softc *sc)
   12441 {
   12442 	uint32_t dword;
   12443 
   12444 	/*
   12445 	 * Linux's code to decode version is very strange, so we don't
   12446 	 * obey that algorithm and just use word 61 as the document.
   12447 	 * Perhaps it's not perfect though...
   12448 	 *
   12449 	 * Example:
   12450 	 *
   12451 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12452 	 */
   12453 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12454 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12455 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12456 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12457 }
   12458 
   12459 static void
   12460 wm_nvm_version(struct wm_softc *sc)
   12461 {
   12462 	uint16_t major, minor, build, patch;
   12463 	uint16_t uid0, uid1;
   12464 	uint16_t nvm_data;
   12465 	uint16_t off;
   12466 	bool check_version = false;
   12467 	bool check_optionrom = false;
   12468 	bool have_build = false;
   12469 	bool have_uid = true;
   12470 
   12471 	/*
   12472 	 * Version format:
   12473 	 *
   12474 	 * XYYZ
   12475 	 * X0YZ
   12476 	 * X0YY
   12477 	 *
   12478 	 * Example:
   12479 	 *
   12480 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12481 	 *	82571	0x50a6	5.10.6?
   12482 	 *	82572	0x506a	5.6.10?
   12483 	 *	82572EI	0x5069	5.6.9?
   12484 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12485 	 *		0x2013	2.1.3?
   12486 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12487 	 */
   12488 
   12489 	/*
   12490 	 * XXX
   12491 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12492 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12493 	 */
   12494 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12495 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12496 		have_uid = false;
   12497 
   12498 	switch (sc->sc_type) {
   12499 	case WM_T_82571:
   12500 	case WM_T_82572:
   12501 	case WM_T_82574:
   12502 	case WM_T_82583:
   12503 		check_version = true;
   12504 		check_optionrom = true;
   12505 		have_build = true;
   12506 		break;
   12507 	case WM_T_82575:
   12508 	case WM_T_82576:
   12509 	case WM_T_82580:
   12510 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12511 			check_version = true;
   12512 		break;
   12513 	case WM_T_I211:
   12514 		wm_nvm_version_invm(sc);
   12515 		have_uid = false;
   12516 		goto printver;
   12517 	case WM_T_I210:
   12518 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12519 			wm_nvm_version_invm(sc);
   12520 			have_uid = false;
   12521 			goto printver;
   12522 		}
   12523 		/* FALLTHROUGH */
   12524 	case WM_T_I350:
   12525 	case WM_T_I354:
   12526 		check_version = true;
   12527 		check_optionrom = true;
   12528 		break;
   12529 	default:
   12530 		return;
   12531 	}
   12532 	if (check_version
   12533 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12534 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12535 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12536 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12537 			build = nvm_data & NVM_BUILD_MASK;
   12538 			have_build = true;
   12539 		} else
   12540 			minor = nvm_data & 0x00ff;
   12541 
   12542 		/* Decimal */
   12543 		minor = (minor / 16) * 10 + (minor % 16);
   12544 		sc->sc_nvm_ver_major = major;
   12545 		sc->sc_nvm_ver_minor = minor;
   12546 
   12547 printver:
   12548 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12549 		    sc->sc_nvm_ver_minor);
   12550 		if (have_build) {
   12551 			sc->sc_nvm_ver_build = build;
   12552 			aprint_verbose(".%d", build);
   12553 		}
   12554 	}
   12555 
   12556 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12557 	if ((sc->sc_nvm_wordsize >= NVM_SIZE) && check_optionrom
   12558 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12559 		/* Option ROM Version */
   12560 		if ((off != 0x0000) && (off != 0xffff)) {
   12561 			int rv;
   12562 
   12563 			off += NVM_COMBO_VER_OFF;
   12564 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12565 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12566 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12567 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12568 				/* 16bits */
   12569 				major = uid0 >> 8;
   12570 				build = (uid0 << 8) | (uid1 >> 8);
   12571 				patch = uid1 & 0x00ff;
   12572 				aprint_verbose(", option ROM Version %d.%d.%d",
   12573 				    major, build, patch);
   12574 			}
   12575 		}
   12576 	}
   12577 
   12578 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12579 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12580 }
   12581 
   12582 /*
   12583  * wm_nvm_read:
   12584  *
   12585  *	Read data from the serial EEPROM.
   12586  */
   12587 static int
   12588 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12589 {
   12590 	int rv;
   12591 
   12592 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12593 		device_xname(sc->sc_dev), __func__));
   12594 
   12595 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12596 		return -1;
   12597 
   12598 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12599 
   12600 	return rv;
   12601 }
   12602 
   12603 /*
   12604  * Hardware semaphores.
   12605  * Very complexed...
   12606  */
   12607 
   12608 static int
   12609 wm_get_null(struct wm_softc *sc)
   12610 {
   12611 
   12612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12613 		device_xname(sc->sc_dev), __func__));
   12614 	return 0;
   12615 }
   12616 
   12617 static void
   12618 wm_put_null(struct wm_softc *sc)
   12619 {
   12620 
   12621 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12622 		device_xname(sc->sc_dev), __func__));
   12623 	return;
   12624 }
   12625 
   12626 static int
   12627 wm_get_eecd(struct wm_softc *sc)
   12628 {
   12629 	uint32_t reg;
   12630 	int x;
   12631 
   12632 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12633 		device_xname(sc->sc_dev), __func__));
   12634 
   12635 	reg = CSR_READ(sc, WMREG_EECD);
   12636 
   12637 	/* Request EEPROM access. */
   12638 	reg |= EECD_EE_REQ;
   12639 	CSR_WRITE(sc, WMREG_EECD, reg);
   12640 
   12641 	/* ..and wait for it to be granted. */
   12642 	for (x = 0; x < 1000; x++) {
   12643 		reg = CSR_READ(sc, WMREG_EECD);
   12644 		if (reg & EECD_EE_GNT)
   12645 			break;
   12646 		delay(5);
   12647 	}
   12648 	if ((reg & EECD_EE_GNT) == 0) {
   12649 		aprint_error_dev(sc->sc_dev,
   12650 		    "could not acquire EEPROM GNT\n");
   12651 		reg &= ~EECD_EE_REQ;
   12652 		CSR_WRITE(sc, WMREG_EECD, reg);
   12653 		return -1;
   12654 	}
   12655 
   12656 	return 0;
   12657 }
   12658 
   12659 static void
   12660 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12661 {
   12662 
   12663 	*eecd |= EECD_SK;
   12664 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12665 	CSR_WRITE_FLUSH(sc);
   12666 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12667 		delay(1);
   12668 	else
   12669 		delay(50);
   12670 }
   12671 
   12672 static void
   12673 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12674 {
   12675 
   12676 	*eecd &= ~EECD_SK;
   12677 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12678 	CSR_WRITE_FLUSH(sc);
   12679 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12680 		delay(1);
   12681 	else
   12682 		delay(50);
   12683 }
   12684 
   12685 static void
   12686 wm_put_eecd(struct wm_softc *sc)
   12687 {
   12688 	uint32_t reg;
   12689 
   12690 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12691 		device_xname(sc->sc_dev), __func__));
   12692 
   12693 	/* Stop nvm */
   12694 	reg = CSR_READ(sc, WMREG_EECD);
   12695 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12696 		/* Pull CS high */
   12697 		reg |= EECD_CS;
   12698 		wm_nvm_eec_clock_lower(sc, &reg);
   12699 	} else {
   12700 		/* CS on Microwire is active-high */
   12701 		reg &= ~(EECD_CS | EECD_DI);
   12702 		CSR_WRITE(sc, WMREG_EECD, reg);
   12703 		wm_nvm_eec_clock_raise(sc, &reg);
   12704 		wm_nvm_eec_clock_lower(sc, &reg);
   12705 	}
   12706 
   12707 	reg = CSR_READ(sc, WMREG_EECD);
   12708 	reg &= ~EECD_EE_REQ;
   12709 	CSR_WRITE(sc, WMREG_EECD, reg);
   12710 
   12711 	return;
   12712 }
   12713 
   12714 /*
   12715  * Get hardware semaphore.
   12716  * Same as e1000_get_hw_semaphore_generic()
   12717  */
   12718 static int
   12719 wm_get_swsm_semaphore(struct wm_softc *sc)
   12720 {
   12721 	int32_t timeout;
   12722 	uint32_t swsm;
   12723 
   12724 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12725 		device_xname(sc->sc_dev), __func__));
   12726 	KASSERT(sc->sc_nvm_wordsize > 0);
   12727 
   12728 retry:
   12729 	/* Get the SW semaphore. */
   12730 	timeout = sc->sc_nvm_wordsize + 1;
   12731 	while (timeout) {
   12732 		swsm = CSR_READ(sc, WMREG_SWSM);
   12733 
   12734 		if ((swsm & SWSM_SMBI) == 0)
   12735 			break;
   12736 
   12737 		delay(50);
   12738 		timeout--;
   12739 	}
   12740 
   12741 	if (timeout == 0) {
   12742 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12743 			/*
   12744 			 * In rare circumstances, the SW semaphore may already
   12745 			 * be held unintentionally. Clear the semaphore once
   12746 			 * before giving up.
   12747 			 */
   12748 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12749 			wm_put_swsm_semaphore(sc);
   12750 			goto retry;
   12751 		}
   12752 		aprint_error_dev(sc->sc_dev,
   12753 		    "could not acquire SWSM SMBI\n");
   12754 		return 1;
   12755 	}
   12756 
   12757 	/* Get the FW semaphore. */
   12758 	timeout = sc->sc_nvm_wordsize + 1;
   12759 	while (timeout) {
   12760 		swsm = CSR_READ(sc, WMREG_SWSM);
   12761 		swsm |= SWSM_SWESMBI;
   12762 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12763 		/* If we managed to set the bit we got the semaphore. */
   12764 		swsm = CSR_READ(sc, WMREG_SWSM);
   12765 		if (swsm & SWSM_SWESMBI)
   12766 			break;
   12767 
   12768 		delay(50);
   12769 		timeout--;
   12770 	}
   12771 
   12772 	if (timeout == 0) {
   12773 		aprint_error_dev(sc->sc_dev,
   12774 		    "could not acquire SWSM SWESMBI\n");
   12775 		/* Release semaphores */
   12776 		wm_put_swsm_semaphore(sc);
   12777 		return 1;
   12778 	}
   12779 	return 0;
   12780 }
   12781 
   12782 /*
   12783  * Put hardware semaphore.
   12784  * Same as e1000_put_hw_semaphore_generic()
   12785  */
   12786 static void
   12787 wm_put_swsm_semaphore(struct wm_softc *sc)
   12788 {
   12789 	uint32_t swsm;
   12790 
   12791 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12792 		device_xname(sc->sc_dev), __func__));
   12793 
   12794 	swsm = CSR_READ(sc, WMREG_SWSM);
   12795 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12796 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12797 }
   12798 
   12799 /*
   12800  * Get SW/FW semaphore.
   12801  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12802  */
   12803 static int
   12804 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12805 {
   12806 	uint32_t swfw_sync;
   12807 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12808 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12809 	int timeout;
   12810 
   12811 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12812 		device_xname(sc->sc_dev), __func__));
   12813 
   12814 	if (sc->sc_type == WM_T_80003)
   12815 		timeout = 50;
   12816 	else
   12817 		timeout = 200;
   12818 
   12819 	for (timeout = 0; timeout < 200; timeout++) {
   12820 		if (wm_get_swsm_semaphore(sc)) {
   12821 			aprint_error_dev(sc->sc_dev,
   12822 			    "%s: failed to get semaphore\n",
   12823 			    __func__);
   12824 			return 1;
   12825 		}
   12826 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12827 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12828 			swfw_sync |= swmask;
   12829 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12830 			wm_put_swsm_semaphore(sc);
   12831 			return 0;
   12832 		}
   12833 		wm_put_swsm_semaphore(sc);
   12834 		delay(5000);
   12835 	}
   12836 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12837 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12838 	return 1;
   12839 }
   12840 
   12841 static void
   12842 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12843 {
   12844 	uint32_t swfw_sync;
   12845 
   12846 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12847 		device_xname(sc->sc_dev), __func__));
   12848 
   12849 	while (wm_get_swsm_semaphore(sc) != 0)
   12850 		continue;
   12851 
   12852 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12853 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12854 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12855 
   12856 	wm_put_swsm_semaphore(sc);
   12857 }
   12858 
   12859 static int
   12860 wm_get_nvm_80003(struct wm_softc *sc)
   12861 {
   12862 	int rv;
   12863 
   12864 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12865 		device_xname(sc->sc_dev), __func__));
   12866 
   12867 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12868 		aprint_error_dev(sc->sc_dev,
   12869 		    "%s: failed to get semaphore(SWFW)\n",
   12870 		    __func__);
   12871 		return rv;
   12872 	}
   12873 
   12874 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12875 	    && (rv = wm_get_eecd(sc)) != 0) {
   12876 		aprint_error_dev(sc->sc_dev,
   12877 		    "%s: failed to get semaphore(EECD)\n",
   12878 		    __func__);
   12879 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12880 		return rv;
   12881 	}
   12882 
   12883 	return 0;
   12884 }
   12885 
   12886 static void
   12887 wm_put_nvm_80003(struct wm_softc *sc)
   12888 {
   12889 
   12890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12891 		device_xname(sc->sc_dev), __func__));
   12892 
   12893 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12894 		wm_put_eecd(sc);
   12895 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12896 }
   12897 
   12898 static int
   12899 wm_get_nvm_82571(struct wm_softc *sc)
   12900 {
   12901 	int rv;
   12902 
   12903 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12904 		device_xname(sc->sc_dev), __func__));
   12905 
   12906 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12907 		return rv;
   12908 
   12909 	switch (sc->sc_type) {
   12910 	case WM_T_82573:
   12911 		break;
   12912 	default:
   12913 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12914 			rv = wm_get_eecd(sc);
   12915 		break;
   12916 	}
   12917 
   12918 	if (rv != 0) {
   12919 		aprint_error_dev(sc->sc_dev,
   12920 		    "%s: failed to get semaphore\n",
   12921 		    __func__);
   12922 		wm_put_swsm_semaphore(sc);
   12923 	}
   12924 
   12925 	return rv;
   12926 }
   12927 
   12928 static void
   12929 wm_put_nvm_82571(struct wm_softc *sc)
   12930 {
   12931 
   12932 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12933 		device_xname(sc->sc_dev), __func__));
   12934 
   12935 	switch (sc->sc_type) {
   12936 	case WM_T_82573:
   12937 		break;
   12938 	default:
   12939 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12940 			wm_put_eecd(sc);
   12941 		break;
   12942 	}
   12943 
   12944 	wm_put_swsm_semaphore(sc);
   12945 }
   12946 
   12947 static int
   12948 wm_get_phy_82575(struct wm_softc *sc)
   12949 {
   12950 
   12951 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12952 		device_xname(sc->sc_dev), __func__));
   12953 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12954 }
   12955 
   12956 static void
   12957 wm_put_phy_82575(struct wm_softc *sc)
   12958 {
   12959 
   12960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12961 		device_xname(sc->sc_dev), __func__));
   12962 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12963 }
   12964 
   12965 static int
   12966 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12967 {
   12968 	uint32_t ext_ctrl;
   12969 	int timeout = 200;
   12970 
   12971 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12972 		device_xname(sc->sc_dev), __func__));
   12973 
   12974 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12975 	for (timeout = 0; timeout < 200; timeout++) {
   12976 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12977 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12978 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12979 
   12980 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12981 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12982 			return 0;
   12983 		delay(5000);
   12984 	}
   12985 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12986 	    device_xname(sc->sc_dev), ext_ctrl);
   12987 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12988 	return 1;
   12989 }
   12990 
   12991 static void
   12992 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12993 {
   12994 	uint32_t ext_ctrl;
   12995 
   12996 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12997 		device_xname(sc->sc_dev), __func__));
   12998 
   12999 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13000 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13001 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13002 
   13003 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13004 }
   13005 
   13006 static int
   13007 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13008 {
   13009 	uint32_t ext_ctrl;
   13010 	int timeout;
   13011 
   13012 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13013 		device_xname(sc->sc_dev), __func__));
   13014 	mutex_enter(sc->sc_ich_phymtx);
   13015 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13016 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13017 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13018 			break;
   13019 		delay(1000);
   13020 	}
   13021 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13022 		printf("%s: SW has already locked the resource\n",
   13023 		    device_xname(sc->sc_dev));
   13024 		goto out;
   13025 	}
   13026 
   13027 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13028 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13029 	for (timeout = 0; timeout < 1000; timeout++) {
   13030 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13031 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13032 			break;
   13033 		delay(1000);
   13034 	}
   13035 	if (timeout >= 1000) {
   13036 		printf("%s: failed to acquire semaphore\n",
   13037 		    device_xname(sc->sc_dev));
   13038 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13039 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13040 		goto out;
   13041 	}
   13042 	return 0;
   13043 
   13044 out:
   13045 	mutex_exit(sc->sc_ich_phymtx);
   13046 	return 1;
   13047 }
   13048 
   13049 static void
   13050 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13051 {
   13052 	uint32_t ext_ctrl;
   13053 
   13054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13055 		device_xname(sc->sc_dev), __func__));
   13056 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13057 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13058 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13059 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13060 	} else {
   13061 		printf("%s: Semaphore unexpectedly released\n",
   13062 		    device_xname(sc->sc_dev));
   13063 	}
   13064 
   13065 	mutex_exit(sc->sc_ich_phymtx);
   13066 }
   13067 
   13068 static int
   13069 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13070 {
   13071 
   13072 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13073 		device_xname(sc->sc_dev), __func__));
   13074 	mutex_enter(sc->sc_ich_nvmmtx);
   13075 
   13076 	return 0;
   13077 }
   13078 
   13079 static void
   13080 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13081 {
   13082 
   13083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13084 		device_xname(sc->sc_dev), __func__));
   13085 	mutex_exit(sc->sc_ich_nvmmtx);
   13086 }
   13087 
   13088 static int
   13089 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13090 {
   13091 	int i = 0;
   13092 	uint32_t reg;
   13093 
   13094 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13095 		device_xname(sc->sc_dev), __func__));
   13096 
   13097 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13098 	do {
   13099 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13100 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13101 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13102 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13103 			break;
   13104 		delay(2*1000);
   13105 		i++;
   13106 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13107 
   13108 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13109 		wm_put_hw_semaphore_82573(sc);
   13110 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13111 		    device_xname(sc->sc_dev));
   13112 		return -1;
   13113 	}
   13114 
   13115 	return 0;
   13116 }
   13117 
   13118 static void
   13119 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13120 {
   13121 	uint32_t reg;
   13122 
   13123 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13124 		device_xname(sc->sc_dev), __func__));
   13125 
   13126 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13127 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13128 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13129 }
   13130 
   13131 /*
   13132  * Management mode and power management related subroutines.
   13133  * BMC, AMT, suspend/resume and EEE.
   13134  */
   13135 
   13136 #ifdef WM_WOL
   13137 static int
   13138 wm_check_mng_mode(struct wm_softc *sc)
   13139 {
   13140 	int rv;
   13141 
   13142 	switch (sc->sc_type) {
   13143 	case WM_T_ICH8:
   13144 	case WM_T_ICH9:
   13145 	case WM_T_ICH10:
   13146 	case WM_T_PCH:
   13147 	case WM_T_PCH2:
   13148 	case WM_T_PCH_LPT:
   13149 	case WM_T_PCH_SPT:
   13150 		rv = wm_check_mng_mode_ich8lan(sc);
   13151 		break;
   13152 	case WM_T_82574:
   13153 	case WM_T_82583:
   13154 		rv = wm_check_mng_mode_82574(sc);
   13155 		break;
   13156 	case WM_T_82571:
   13157 	case WM_T_82572:
   13158 	case WM_T_82573:
   13159 	case WM_T_80003:
   13160 		rv = wm_check_mng_mode_generic(sc);
   13161 		break;
   13162 	default:
   13163 		/* noting to do */
   13164 		rv = 0;
   13165 		break;
   13166 	}
   13167 
   13168 	return rv;
   13169 }
   13170 
   13171 static int
   13172 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13173 {
   13174 	uint32_t fwsm;
   13175 
   13176 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13177 
   13178 	if (((fwsm & FWSM_FW_VALID) != 0)
   13179 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13180 		return 1;
   13181 
   13182 	return 0;
   13183 }
   13184 
   13185 static int
   13186 wm_check_mng_mode_82574(struct wm_softc *sc)
   13187 {
   13188 	uint16_t data;
   13189 
   13190 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13191 
   13192 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13193 		return 1;
   13194 
   13195 	return 0;
   13196 }
   13197 
   13198 static int
   13199 wm_check_mng_mode_generic(struct wm_softc *sc)
   13200 {
   13201 	uint32_t fwsm;
   13202 
   13203 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13204 
   13205 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13206 		return 1;
   13207 
   13208 	return 0;
   13209 }
   13210 #endif /* WM_WOL */
   13211 
   13212 static int
   13213 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13214 {
   13215 	uint32_t manc, fwsm, factps;
   13216 
   13217 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13218 		return 0;
   13219 
   13220 	manc = CSR_READ(sc, WMREG_MANC);
   13221 
   13222 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13223 		device_xname(sc->sc_dev), manc));
   13224 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13225 		return 0;
   13226 
   13227 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13228 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13229 		factps = CSR_READ(sc, WMREG_FACTPS);
   13230 		if (((factps & FACTPS_MNGCG) == 0)
   13231 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13232 			return 1;
   13233 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13234 		uint16_t data;
   13235 
   13236 		factps = CSR_READ(sc, WMREG_FACTPS);
   13237 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13238 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13239 			device_xname(sc->sc_dev), factps, data));
   13240 		if (((factps & FACTPS_MNGCG) == 0)
   13241 		    && ((data & NVM_CFG2_MNGM_MASK)
   13242 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13243 			return 1;
   13244 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13245 	    && ((manc & MANC_ASF_EN) == 0))
   13246 		return 1;
   13247 
   13248 	return 0;
   13249 }
   13250 
   13251 static bool
   13252 wm_phy_resetisblocked(struct wm_softc *sc)
   13253 {
   13254 	bool blocked = false;
   13255 	uint32_t reg;
   13256 	int i = 0;
   13257 
   13258 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13259 		device_xname(sc->sc_dev), __func__));
   13260 
   13261 	switch (sc->sc_type) {
   13262 	case WM_T_ICH8:
   13263 	case WM_T_ICH9:
   13264 	case WM_T_ICH10:
   13265 	case WM_T_PCH:
   13266 	case WM_T_PCH2:
   13267 	case WM_T_PCH_LPT:
   13268 	case WM_T_PCH_SPT:
   13269 		do {
   13270 			reg = CSR_READ(sc, WMREG_FWSM);
   13271 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13272 				blocked = true;
   13273 				delay(10*1000);
   13274 				continue;
   13275 			}
   13276 			blocked = false;
   13277 		} while (blocked && (i++ < 30));
   13278 		return blocked;
   13279 		break;
   13280 	case WM_T_82571:
   13281 	case WM_T_82572:
   13282 	case WM_T_82573:
   13283 	case WM_T_82574:
   13284 	case WM_T_82583:
   13285 	case WM_T_80003:
   13286 		reg = CSR_READ(sc, WMREG_MANC);
   13287 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13288 			return true;
   13289 		else
   13290 			return false;
   13291 		break;
   13292 	default:
   13293 		/* no problem */
   13294 		break;
   13295 	}
   13296 
   13297 	return false;
   13298 }
   13299 
   13300 static void
   13301 wm_get_hw_control(struct wm_softc *sc)
   13302 {
   13303 	uint32_t reg;
   13304 
   13305 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13306 		device_xname(sc->sc_dev), __func__));
   13307 
   13308 	if (sc->sc_type == WM_T_82573) {
   13309 		reg = CSR_READ(sc, WMREG_SWSM);
   13310 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13311 	} else if (sc->sc_type >= WM_T_82571) {
   13312 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13313 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13314 	}
   13315 }
   13316 
   13317 static void
   13318 wm_release_hw_control(struct wm_softc *sc)
   13319 {
   13320 	uint32_t reg;
   13321 
   13322 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13323 		device_xname(sc->sc_dev), __func__));
   13324 
   13325 	if (sc->sc_type == WM_T_82573) {
   13326 		reg = CSR_READ(sc, WMREG_SWSM);
   13327 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13328 	} else if (sc->sc_type >= WM_T_82571) {
   13329 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13330 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13331 	}
   13332 }
   13333 
   13334 static void
   13335 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13336 {
   13337 	uint32_t reg;
   13338 
   13339 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13340 		device_xname(sc->sc_dev), __func__));
   13341 
   13342 	if (sc->sc_type < WM_T_PCH2)
   13343 		return;
   13344 
   13345 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13346 
   13347 	if (gate)
   13348 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13349 	else
   13350 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13351 
   13352 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13353 }
   13354 
   13355 static void
   13356 wm_smbustopci(struct wm_softc *sc)
   13357 {
   13358 	uint32_t fwsm, reg;
   13359 	int rv = 0;
   13360 
   13361 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13362 		device_xname(sc->sc_dev), __func__));
   13363 
   13364 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13365 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13366 
   13367 	/* Disable ULP */
   13368 	wm_ulp_disable(sc);
   13369 
   13370 	/* Acquire PHY semaphore */
   13371 	sc->phy.acquire(sc);
   13372 
   13373 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13374 	switch (sc->sc_type) {
   13375 	case WM_T_PCH_LPT:
   13376 	case WM_T_PCH_SPT:
   13377 		if (wm_phy_is_accessible_pchlan(sc))
   13378 			break;
   13379 
   13380 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13381 		reg |= CTRL_EXT_FORCE_SMBUS;
   13382 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13383 #if 0
   13384 		/* XXX Isn't this required??? */
   13385 		CSR_WRITE_FLUSH(sc);
   13386 #endif
   13387 		delay(50 * 1000);
   13388 		/* FALLTHROUGH */
   13389 	case WM_T_PCH2:
   13390 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13391 			break;
   13392 		/* FALLTHROUGH */
   13393 	case WM_T_PCH:
   13394 		if (sc->sc_type == WM_T_PCH)
   13395 			if ((fwsm & FWSM_FW_VALID) != 0)
   13396 				break;
   13397 
   13398 		if (wm_phy_resetisblocked(sc) == true) {
   13399 			printf("XXX reset is blocked(3)\n");
   13400 			break;
   13401 		}
   13402 
   13403 		wm_toggle_lanphypc_pch_lpt(sc);
   13404 
   13405 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13406 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13407 				break;
   13408 
   13409 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13410 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13411 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13412 
   13413 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13414 				break;
   13415 			rv = -1;
   13416 		}
   13417 		break;
   13418 	default:
   13419 		break;
   13420 	}
   13421 
   13422 	/* Release semaphore */
   13423 	sc->phy.release(sc);
   13424 
   13425 	if (rv == 0) {
   13426 		if (wm_phy_resetisblocked(sc)) {
   13427 			printf("XXX reset is blocked(4)\n");
   13428 			goto out;
   13429 		}
   13430 		wm_reset_phy(sc);
   13431 		if (wm_phy_resetisblocked(sc))
   13432 			printf("XXX reset is blocked(4)\n");
   13433 	}
   13434 
   13435 out:
   13436 	/*
   13437 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13438 	 */
   13439 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13440 		delay(10*1000);
   13441 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13442 	}
   13443 }
   13444 
   13445 static void
   13446 wm_init_manageability(struct wm_softc *sc)
   13447 {
   13448 
   13449 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13450 		device_xname(sc->sc_dev), __func__));
   13451 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13452 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13453 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13454 
   13455 		/* Disable hardware interception of ARP */
   13456 		manc &= ~MANC_ARP_EN;
   13457 
   13458 		/* Enable receiving management packets to the host */
   13459 		if (sc->sc_type >= WM_T_82571) {
   13460 			manc |= MANC_EN_MNG2HOST;
   13461 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13462 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13463 		}
   13464 
   13465 		CSR_WRITE(sc, WMREG_MANC, manc);
   13466 	}
   13467 }
   13468 
   13469 static void
   13470 wm_release_manageability(struct wm_softc *sc)
   13471 {
   13472 
   13473 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13474 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13475 
   13476 		manc |= MANC_ARP_EN;
   13477 		if (sc->sc_type >= WM_T_82571)
   13478 			manc &= ~MANC_EN_MNG2HOST;
   13479 
   13480 		CSR_WRITE(sc, WMREG_MANC, manc);
   13481 	}
   13482 }
   13483 
   13484 static void
   13485 wm_get_wakeup(struct wm_softc *sc)
   13486 {
   13487 
   13488 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13489 	switch (sc->sc_type) {
   13490 	case WM_T_82573:
   13491 	case WM_T_82583:
   13492 		sc->sc_flags |= WM_F_HAS_AMT;
   13493 		/* FALLTHROUGH */
   13494 	case WM_T_80003:
   13495 	case WM_T_82575:
   13496 	case WM_T_82576:
   13497 	case WM_T_82580:
   13498 	case WM_T_I350:
   13499 	case WM_T_I354:
   13500 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13501 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13502 		/* FALLTHROUGH */
   13503 	case WM_T_82541:
   13504 	case WM_T_82541_2:
   13505 	case WM_T_82547:
   13506 	case WM_T_82547_2:
   13507 	case WM_T_82571:
   13508 	case WM_T_82572:
   13509 	case WM_T_82574:
   13510 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13511 		break;
   13512 	case WM_T_ICH8:
   13513 	case WM_T_ICH9:
   13514 	case WM_T_ICH10:
   13515 	case WM_T_PCH:
   13516 	case WM_T_PCH2:
   13517 	case WM_T_PCH_LPT:
   13518 	case WM_T_PCH_SPT:
   13519 		sc->sc_flags |= WM_F_HAS_AMT;
   13520 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13521 		break;
   13522 	default:
   13523 		break;
   13524 	}
   13525 
   13526 	/* 1: HAS_MANAGE */
   13527 	if (wm_enable_mng_pass_thru(sc) != 0)
   13528 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13529 
   13530 	/*
   13531 	 * Note that the WOL flags is set after the resetting of the eeprom
   13532 	 * stuff
   13533 	 */
   13534 }
   13535 
   13536 /*
   13537  * Unconfigure Ultra Low Power mode.
   13538  * Only for I217 and newer (see below).
   13539  */
   13540 static void
   13541 wm_ulp_disable(struct wm_softc *sc)
   13542 {
   13543 	uint32_t reg;
   13544 	int i = 0;
   13545 
   13546 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13547 		device_xname(sc->sc_dev), __func__));
   13548 	/* Exclude old devices */
   13549 	if ((sc->sc_type < WM_T_PCH_LPT)
   13550 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13551 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13552 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13553 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13554 		return;
   13555 
   13556 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13557 		/* Request ME un-configure ULP mode in the PHY */
   13558 		reg = CSR_READ(sc, WMREG_H2ME);
   13559 		reg &= ~H2ME_ULP;
   13560 		reg |= H2ME_ENFORCE_SETTINGS;
   13561 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13562 
   13563 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13564 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13565 			if (i++ == 30) {
   13566 				printf("%s timed out\n", __func__);
   13567 				return;
   13568 			}
   13569 			delay(10 * 1000);
   13570 		}
   13571 		reg = CSR_READ(sc, WMREG_H2ME);
   13572 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13573 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13574 
   13575 		return;
   13576 	}
   13577 
   13578 	/* Acquire semaphore */
   13579 	sc->phy.acquire(sc);
   13580 
   13581 	/* Toggle LANPHYPC */
   13582 	wm_toggle_lanphypc_pch_lpt(sc);
   13583 
   13584 	/* Unforce SMBus mode in PHY */
   13585 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13586 	if (reg == 0x0000 || reg == 0xffff) {
   13587 		uint32_t reg2;
   13588 
   13589 		printf("%s: Force SMBus first.\n", __func__);
   13590 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13591 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13592 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13593 		delay(50 * 1000);
   13594 
   13595 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13596 	}
   13597 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13598 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13599 
   13600 	/* Unforce SMBus mode in MAC */
   13601 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13602 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13603 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13604 
   13605 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13606 	reg |= HV_PM_CTRL_K1_ENA;
   13607 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13608 
   13609 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13610 	reg &= ~(I218_ULP_CONFIG1_IND
   13611 	    | I218_ULP_CONFIG1_STICKY_ULP
   13612 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13613 	    | I218_ULP_CONFIG1_WOL_HOST
   13614 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13615 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13616 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13617 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13618 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13619 	reg |= I218_ULP_CONFIG1_START;
   13620 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13621 
   13622 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13623 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13624 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13625 
   13626 	/* Release semaphore */
   13627 	sc->phy.release(sc);
   13628 	wm_gmii_reset(sc);
   13629 	delay(50 * 1000);
   13630 }
   13631 
   13632 /* WOL in the newer chipset interfaces (pchlan) */
   13633 static void
   13634 wm_enable_phy_wakeup(struct wm_softc *sc)
   13635 {
   13636 #if 0
   13637 	uint16_t preg;
   13638 
   13639 	/* Copy MAC RARs to PHY RARs */
   13640 
   13641 	/* Copy MAC MTA to PHY MTA */
   13642 
   13643 	/* Configure PHY Rx Control register */
   13644 
   13645 	/* Enable PHY wakeup in MAC register */
   13646 
   13647 	/* Configure and enable PHY wakeup in PHY registers */
   13648 
   13649 	/* Activate PHY wakeup */
   13650 
   13651 	/* XXX */
   13652 #endif
   13653 }
   13654 
   13655 /* Power down workaround on D3 */
   13656 static void
   13657 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13658 {
   13659 	uint32_t reg;
   13660 	int i;
   13661 
   13662 	for (i = 0; i < 2; i++) {
   13663 		/* Disable link */
   13664 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13665 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13666 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13667 
   13668 		/*
   13669 		 * Call gig speed drop workaround on Gig disable before
   13670 		 * accessing any PHY registers
   13671 		 */
   13672 		if (sc->sc_type == WM_T_ICH8)
   13673 			wm_gig_downshift_workaround_ich8lan(sc);
   13674 
   13675 		/* Write VR power-down enable */
   13676 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13677 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13678 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13679 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13680 
   13681 		/* Read it back and test */
   13682 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13683 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13684 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13685 			break;
   13686 
   13687 		/* Issue PHY reset and repeat at most one more time */
   13688 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13689 	}
   13690 }
   13691 
   13692 static void
   13693 wm_enable_wakeup(struct wm_softc *sc)
   13694 {
   13695 	uint32_t reg, pmreg;
   13696 	pcireg_t pmode;
   13697 
   13698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13699 		device_xname(sc->sc_dev), __func__));
   13700 
   13701 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13702 		&pmreg, NULL) == 0)
   13703 		return;
   13704 
   13705 	/* Advertise the wakeup capability */
   13706 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13707 	    | CTRL_SWDPIN(3));
   13708 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13709 
   13710 	/* ICH workaround */
   13711 	switch (sc->sc_type) {
   13712 	case WM_T_ICH8:
   13713 	case WM_T_ICH9:
   13714 	case WM_T_ICH10:
   13715 	case WM_T_PCH:
   13716 	case WM_T_PCH2:
   13717 	case WM_T_PCH_LPT:
   13718 	case WM_T_PCH_SPT:
   13719 		/* Disable gig during WOL */
   13720 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13721 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13722 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13723 		if (sc->sc_type == WM_T_PCH)
   13724 			wm_gmii_reset(sc);
   13725 
   13726 		/* Power down workaround */
   13727 		if (sc->sc_phytype == WMPHY_82577) {
   13728 			struct mii_softc *child;
   13729 
   13730 			/* Assume that the PHY is copper */
   13731 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13732 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13733 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13734 				    (768 << 5) | 25, 0x0444); /* magic num */
   13735 		}
   13736 		break;
   13737 	default:
   13738 		break;
   13739 	}
   13740 
   13741 	/* Keep the laser running on fiber adapters */
   13742 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13743 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13744 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13745 		reg |= CTRL_EXT_SWDPIN(3);
   13746 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13747 	}
   13748 
   13749 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13750 #if 0	/* for the multicast packet */
   13751 	reg |= WUFC_MC;
   13752 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13753 #endif
   13754 
   13755 	if (sc->sc_type >= WM_T_PCH)
   13756 		wm_enable_phy_wakeup(sc);
   13757 	else {
   13758 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13759 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13760 	}
   13761 
   13762 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13763 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13764 		|| (sc->sc_type == WM_T_PCH2))
   13765 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13766 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13767 
   13768 	/* Request PME */
   13769 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13770 #if 0
   13771 	/* Disable WOL */
   13772 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13773 #else
   13774 	/* For WOL */
   13775 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13776 #endif
   13777 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13778 }
   13779 
   13780 /* LPLU */
   13781 
   13782 static void
   13783 wm_lplu_d0_disable(struct wm_softc *sc)
   13784 {
   13785 	struct mii_data *mii = &sc->sc_mii;
   13786 	uint32_t reg;
   13787 
   13788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13789 		device_xname(sc->sc_dev), __func__));
   13790 
   13791 	if (sc->sc_phytype == WMPHY_IFE)
   13792 		return;
   13793 
   13794 	switch (sc->sc_type) {
   13795 	case WM_T_82571:
   13796 	case WM_T_82572:
   13797 	case WM_T_82573:
   13798 	case WM_T_82575:
   13799 	case WM_T_82576:
   13800 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13801 		reg &= ~PMR_D0_LPLU;
   13802 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13803 		break;
   13804 	case WM_T_82580:
   13805 	case WM_T_I350:
   13806 	case WM_T_I210:
   13807 	case WM_T_I211:
   13808 		reg = CSR_READ(sc, WMREG_PHPM);
   13809 		reg &= ~PHPM_D0A_LPLU;
   13810 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13811 		break;
   13812 	case WM_T_82574:
   13813 	case WM_T_82583:
   13814 	case WM_T_ICH8:
   13815 	case WM_T_ICH9:
   13816 	case WM_T_ICH10:
   13817 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13818 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13819 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13820 		CSR_WRITE_FLUSH(sc);
   13821 		break;
   13822 	case WM_T_PCH:
   13823 	case WM_T_PCH2:
   13824 	case WM_T_PCH_LPT:
   13825 	case WM_T_PCH_SPT:
   13826 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13827 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13828 		if (wm_phy_resetisblocked(sc) == false)
   13829 			reg |= HV_OEM_BITS_ANEGNOW;
   13830 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13831 		break;
   13832 	default:
   13833 		break;
   13834 	}
   13835 }
   13836 
   13837 /* EEE */
   13838 
   13839 static void
   13840 wm_set_eee_i350(struct wm_softc *sc)
   13841 {
   13842 	uint32_t ipcnfg, eeer;
   13843 
   13844 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13845 	eeer = CSR_READ(sc, WMREG_EEER);
   13846 
   13847 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13848 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13849 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13850 		    | EEER_LPI_FC);
   13851 	} else {
   13852 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13853 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13854 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13855 		    | EEER_LPI_FC);
   13856 	}
   13857 
   13858 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13859 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13860 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13861 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13862 }
   13863 
   13864 /*
   13865  * Workarounds (mainly PHY related).
   13866  * Basically, PHY's workarounds are in the PHY drivers.
   13867  */
   13868 
   13869 /* Work-around for 82566 Kumeran PCS lock loss */
   13870 static void
   13871 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13872 {
   13873 	struct mii_data *mii = &sc->sc_mii;
   13874 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13875 	int i;
   13876 	int reg;
   13877 
   13878 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13879 		device_xname(sc->sc_dev), __func__));
   13880 
   13881 	/* If the link is not up, do nothing */
   13882 	if ((status & STATUS_LU) == 0)
   13883 		return;
   13884 
   13885 	/* Nothing to do if the link is other than 1Gbps */
   13886 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13887 		return;
   13888 
   13889 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13890 	for (i = 0; i < 10; i++) {
   13891 		/* read twice */
   13892 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13893 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13894 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13895 			goto out;	/* GOOD! */
   13896 
   13897 		/* Reset the PHY */
   13898 		wm_reset_phy(sc);
   13899 		delay(5*1000);
   13900 	}
   13901 
   13902 	/* Disable GigE link negotiation */
   13903 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13904 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13905 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13906 
   13907 	/*
   13908 	 * Call gig speed drop workaround on Gig disable before accessing
   13909 	 * any PHY registers.
   13910 	 */
   13911 	wm_gig_downshift_workaround_ich8lan(sc);
   13912 
   13913 out:
   13914 	return;
   13915 }
   13916 
   13917 /* WOL from S5 stops working */
   13918 static void
   13919 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13920 {
   13921 	uint16_t kmreg;
   13922 
   13923 	/* Only for igp3 */
   13924 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13925 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13926 			return;
   13927 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13928 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13929 			return;
   13930 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13931 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13932 	}
   13933 }
   13934 
   13935 /*
   13936  * Workaround for pch's PHYs
   13937  * XXX should be moved to new PHY driver?
   13938  */
   13939 static void
   13940 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13941 {
   13942 
   13943 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13944 		device_xname(sc->sc_dev), __func__));
   13945 	KASSERT(sc->sc_type == WM_T_PCH);
   13946 
   13947 	if (sc->sc_phytype == WMPHY_82577)
   13948 		wm_set_mdio_slow_mode_hv(sc);
   13949 
   13950 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13951 
   13952 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13953 
   13954 	/* 82578 */
   13955 	if (sc->sc_phytype == WMPHY_82578) {
   13956 		struct mii_softc *child;
   13957 
   13958 		/*
   13959 		 * Return registers to default by doing a soft reset then
   13960 		 * writing 0x3140 to the control register
   13961 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13962 		 */
   13963 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13964 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13965 			PHY_RESET(child);
   13966 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13967 			    0x3140);
   13968 		}
   13969 	}
   13970 
   13971 	/* Select page 0 */
   13972 	sc->phy.acquire(sc);
   13973 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13974 	sc->phy.release(sc);
   13975 
   13976 	/*
   13977 	 * Configure the K1 Si workaround during phy reset assuming there is
   13978 	 * link so that it disables K1 if link is in 1Gbps.
   13979 	 */
   13980 	wm_k1_gig_workaround_hv(sc, 1);
   13981 }
   13982 
   13983 static void
   13984 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13985 {
   13986 
   13987 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13988 		device_xname(sc->sc_dev), __func__));
   13989 	KASSERT(sc->sc_type == WM_T_PCH2);
   13990 
   13991 	wm_set_mdio_slow_mode_hv(sc);
   13992 }
   13993 
   13994 static int
   13995 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13996 {
   13997 	int k1_enable = sc->sc_nvm_k1_enabled;
   13998 
   13999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14000 		device_xname(sc->sc_dev), __func__));
   14001 
   14002 	if (sc->phy.acquire(sc) != 0)
   14003 		return -1;
   14004 
   14005 	if (link) {
   14006 		k1_enable = 0;
   14007 
   14008 		/* Link stall fix for link up */
   14009 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14010 	} else {
   14011 		/* Link stall fix for link down */
   14012 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14013 	}
   14014 
   14015 	wm_configure_k1_ich8lan(sc, k1_enable);
   14016 	sc->phy.release(sc);
   14017 
   14018 	return 0;
   14019 }
   14020 
   14021 static void
   14022 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14023 {
   14024 	uint32_t reg;
   14025 
   14026 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14027 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14028 	    reg | HV_KMRN_MDIO_SLOW);
   14029 }
   14030 
   14031 static void
   14032 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14033 {
   14034 	uint32_t ctrl, ctrl_ext, tmp;
   14035 	uint16_t kmreg;
   14036 	int rv;
   14037 
   14038 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14039 	if (rv != 0)
   14040 		return;
   14041 
   14042 	if (k1_enable)
   14043 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14044 	else
   14045 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14046 
   14047 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14048 	if (rv != 0)
   14049 		return;
   14050 
   14051 	delay(20);
   14052 
   14053 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14054 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14055 
   14056 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14057 	tmp |= CTRL_FRCSPD;
   14058 
   14059 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14060 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14061 	CSR_WRITE_FLUSH(sc);
   14062 	delay(20);
   14063 
   14064 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14065 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14066 	CSR_WRITE_FLUSH(sc);
   14067 	delay(20);
   14068 
   14069 	return;
   14070 }
   14071 
   14072 /* special case - for 82575 - need to do manual init ... */
   14073 static void
   14074 wm_reset_init_script_82575(struct wm_softc *sc)
   14075 {
   14076 	/*
   14077 	 * remark: this is untested code - we have no board without EEPROM
   14078 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14079 	 */
   14080 
   14081 	/* SerDes configuration via SERDESCTRL */
   14082 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14083 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14084 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14085 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14086 
   14087 	/* CCM configuration via CCMCTL register */
   14088 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14089 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14090 
   14091 	/* PCIe lanes configuration */
   14092 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14093 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14094 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14095 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14096 
   14097 	/* PCIe PLL Configuration */
   14098 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14099 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14100 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14101 }
   14102 
   14103 static void
   14104 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14105 {
   14106 	uint32_t reg;
   14107 	uint16_t nvmword;
   14108 	int rv;
   14109 
   14110 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14111 		return;
   14112 
   14113 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14114 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14115 	if (rv != 0) {
   14116 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14117 		    __func__);
   14118 		return;
   14119 	}
   14120 
   14121 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14122 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14123 		reg |= MDICNFG_DEST;
   14124 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14125 		reg |= MDICNFG_COM_MDIO;
   14126 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14127 }
   14128 
   14129 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14130 
   14131 static bool
   14132 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14133 {
   14134 	int i;
   14135 	uint32_t reg;
   14136 	uint16_t id1, id2;
   14137 
   14138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14139 		device_xname(sc->sc_dev), __func__));
   14140 	id1 = id2 = 0xffff;
   14141 	for (i = 0; i < 2; i++) {
   14142 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14143 		if (MII_INVALIDID(id1))
   14144 			continue;
   14145 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14146 		if (MII_INVALIDID(id2))
   14147 			continue;
   14148 		break;
   14149 	}
   14150 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14151 		goto out;
   14152 	}
   14153 
   14154 	if (sc->sc_type < WM_T_PCH_LPT) {
   14155 		sc->phy.release(sc);
   14156 		wm_set_mdio_slow_mode_hv(sc);
   14157 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14158 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14159 		sc->phy.acquire(sc);
   14160 	}
   14161 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14162 		printf("XXX return with false\n");
   14163 		return false;
   14164 	}
   14165 out:
   14166 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14167 		/* Only unforce SMBus if ME is not active */
   14168 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14169 			/* Unforce SMBus mode in PHY */
   14170 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14171 			    CV_SMB_CTRL);
   14172 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14173 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14174 			    CV_SMB_CTRL, reg);
   14175 
   14176 			/* Unforce SMBus mode in MAC */
   14177 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14178 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14179 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14180 		}
   14181 	}
   14182 	return true;
   14183 }
   14184 
   14185 static void
   14186 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14187 {
   14188 	uint32_t reg;
   14189 	int i;
   14190 
   14191 	/* Set PHY Config Counter to 50msec */
   14192 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14193 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14194 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14195 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14196 
   14197 	/* Toggle LANPHYPC */
   14198 	reg = CSR_READ(sc, WMREG_CTRL);
   14199 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14200 	reg &= ~CTRL_LANPHYPC_VALUE;
   14201 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14202 	CSR_WRITE_FLUSH(sc);
   14203 	delay(1000);
   14204 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14205 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14206 	CSR_WRITE_FLUSH(sc);
   14207 
   14208 	if (sc->sc_type < WM_T_PCH_LPT)
   14209 		delay(50 * 1000);
   14210 	else {
   14211 		i = 20;
   14212 
   14213 		do {
   14214 			delay(5 * 1000);
   14215 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14216 		    && i--);
   14217 
   14218 		delay(30 * 1000);
   14219 	}
   14220 }
   14221 
   14222 static int
   14223 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14224 {
   14225 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14226 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14227 	uint32_t rxa;
   14228 	uint16_t scale = 0, lat_enc = 0;
   14229 	int32_t obff_hwm = 0;
   14230 	int64_t lat_ns, value;
   14231 
   14232 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14233 		device_xname(sc->sc_dev), __func__));
   14234 
   14235 	if (link) {
   14236 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14237 		uint32_t status;
   14238 		uint16_t speed;
   14239 		pcireg_t preg;
   14240 
   14241 		status = CSR_READ(sc, WMREG_STATUS);
   14242 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14243 		case STATUS_SPEED_10:
   14244 			speed = 10;
   14245 			break;
   14246 		case STATUS_SPEED_100:
   14247 			speed = 100;
   14248 			break;
   14249 		case STATUS_SPEED_1000:
   14250 			speed = 1000;
   14251 			break;
   14252 		default:
   14253 			device_printf(sc->sc_dev, "Unknown speed "
   14254 			    "(status = %08x)\n", status);
   14255 			return -1;
   14256 		}
   14257 
   14258 		/* Rx Packet Buffer Allocation size (KB) */
   14259 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14260 
   14261 		/*
   14262 		 * Determine the maximum latency tolerated by the device.
   14263 		 *
   14264 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14265 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14266 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14267 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14268 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14269 		 */
   14270 		lat_ns = ((int64_t)rxa * 1024 -
   14271 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14272 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14273 		if (lat_ns < 0)
   14274 			lat_ns = 0;
   14275 		else
   14276 			lat_ns /= speed;
   14277 		value = lat_ns;
   14278 
   14279 		while (value > LTRV_VALUE) {
   14280 			scale ++;
   14281 			value = howmany(value, __BIT(5));
   14282 		}
   14283 		if (scale > LTRV_SCALE_MAX) {
   14284 			printf("%s: Invalid LTR latency scale %d\n",
   14285 			    device_xname(sc->sc_dev), scale);
   14286 			return -1;
   14287 		}
   14288 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14289 
   14290 		/* Determine the maximum latency tolerated by the platform */
   14291 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14292 		    WM_PCI_LTR_CAP_LPT);
   14293 		max_snoop = preg & 0xffff;
   14294 		max_nosnoop = preg >> 16;
   14295 
   14296 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14297 
   14298 		if (lat_enc > max_ltr_enc) {
   14299 			lat_enc = max_ltr_enc;
   14300 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14301 			    * PCI_LTR_SCALETONS(
   14302 				    __SHIFTOUT(lat_enc,
   14303 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14304 		}
   14305 
   14306 		if (lat_ns) {
   14307 			lat_ns *= speed * 1000;
   14308 			lat_ns /= 8;
   14309 			lat_ns /= 1000000000;
   14310 			obff_hwm = (int32_t)(rxa - lat_ns);
   14311 		}
   14312 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14313 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14314 			    "(rxa = %d, lat_ns = %d)\n",
   14315 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14316 			return -1;
   14317 		}
   14318 	}
   14319 	/* Snoop and No-Snoop latencies the same */
   14320 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14321 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14322 
   14323 	/* Set OBFF high water mark */
   14324 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14325 	reg |= obff_hwm;
   14326 	CSR_WRITE(sc, WMREG_SVT, reg);
   14327 
   14328 	/* Enable OBFF */
   14329 	reg = CSR_READ(sc, WMREG_SVCR);
   14330 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14331 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14332 
   14333 	return 0;
   14334 }
   14335 
   14336 /*
   14337  * I210 Errata 25 and I211 Errata 10
   14338  * Slow System Clock.
   14339  */
   14340 static void
   14341 wm_pll_workaround_i210(struct wm_softc *sc)
   14342 {
   14343 	uint32_t mdicnfg, wuc;
   14344 	uint32_t reg;
   14345 	pcireg_t pcireg;
   14346 	uint32_t pmreg;
   14347 	uint16_t nvmword, tmp_nvmword;
   14348 	int phyval;
   14349 	bool wa_done = false;
   14350 	int i;
   14351 
   14352 	/* Save WUC and MDICNFG registers */
   14353 	wuc = CSR_READ(sc, WMREG_WUC);
   14354 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14355 
   14356 	reg = mdicnfg & ~MDICNFG_DEST;
   14357 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14358 
   14359 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14360 		nvmword = INVM_DEFAULT_AL;
   14361 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14362 
   14363 	/* Get Power Management cap offset */
   14364 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14365 		&pmreg, NULL) == 0)
   14366 		return;
   14367 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14368 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14369 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14370 
   14371 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14372 			break; /* OK */
   14373 		}
   14374 
   14375 		wa_done = true;
   14376 		/* Directly reset the internal PHY */
   14377 		reg = CSR_READ(sc, WMREG_CTRL);
   14378 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14379 
   14380 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14381 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14382 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14383 
   14384 		CSR_WRITE(sc, WMREG_WUC, 0);
   14385 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14386 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14387 
   14388 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14389 		    pmreg + PCI_PMCSR);
   14390 		pcireg |= PCI_PMCSR_STATE_D3;
   14391 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14392 		    pmreg + PCI_PMCSR, pcireg);
   14393 		delay(1000);
   14394 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14395 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14396 		    pmreg + PCI_PMCSR, pcireg);
   14397 
   14398 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14399 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14400 
   14401 		/* Restore WUC register */
   14402 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14403 	}
   14404 
   14405 	/* Restore MDICNFG setting */
   14406 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14407 	if (wa_done)
   14408 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14409 }
   14410 
   14411 static void
   14412 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14413 {
   14414 	uint32_t reg;
   14415 
   14416 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14417 		device_xname(sc->sc_dev), __func__));
   14418 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14419 
   14420 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14421 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14422 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14423 
   14424 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14425 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14426 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14427 }
   14428